HBASE-16257 Move staging dir to be under hbase root dir

Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/50b051ad
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/50b051ad
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/50b051ad

Branch: refs/heads/hbase-14439
Commit: 50b051ade1d38bba0b936db1c751e5045bc103cb
Parents: d2ed74c
Author: Jerry He <jerry...@apache.org>
Authored: Fri Sep 23 10:07:58 2016 -0700
Committer: Jerry He <jerry...@apache.org>
Committed: Fri Sep 23 10:07:58 2016 -0700

----------------------------------------------------------------------
 .../hbase/client/SecureBulkLoadClient.java      |   5 -
 .../hbase/security/SecureBulkLoadUtil.java      |  46 ------
 .../org/apache/hadoop/hbase/HConstants.java     |   3 +
 .../src/main/resources/hbase-default.xml        |   9 +-
 .../hadoop/hbase/HBaseCommonTestingUtility.java |   1 -
 .../hbase/mapreduce/LoadIncrementalHFiles.java  |   4 +-
 .../hadoop/hbase/master/MasterFileSystem.java   | 141 ++++++++++++++++---
 .../hadoop/hbase/master/MasterWalManager.java   |   5 -
 .../regionserver/SecureBulkLoadManager.java     |  36 ++---
 .../regionserver/HFileReplicator.java           |   7 +-
 .../hbase/util/hbck/HFileCorruptionChecker.java |   8 +-
 .../apache/hadoop/hbase/wal/WALSplitter.java    |   8 +-
 .../hadoop/hbase/HBaseTestingUtility.java       |   1 -
 .../mapreduce/TestLoadIncrementalHFiles.java    |   4 +-
 .../SecureBulkLoadEndpointClient.java           |   4 -
 .../apache/hadoop/hbase/wal/TestWALSplit.java   |   3 +-
 16 files changed, 153 insertions(+), 132 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
index f460bdb..eddf8f1 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
@@ -36,7 +36,6 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.PrepareBulkLoadRe
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.PrepareBulkLoadResponse;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-import org.apache.hadoop.hbase.security.SecureBulkLoadUtil;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.security.token.Token;
 
@@ -125,8 +124,4 @@ public class SecureBulkLoadClient {
       throw ProtobufUtil.handleRemoteException(se);
     }
   }
-
-  public Path getStagingPath(String bulkToken, byte[] family) throws 
IOException {
-    return SecureBulkLoadUtil.getStagingPath(table.getConfiguration(), 
bulkToken, family);
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
deleted file mode 100644
index 5af6891..0000000
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.security;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.util.Bytes;
-
-@InterfaceAudience.Private
-public class SecureBulkLoadUtil {
-  private final static String BULKLOAD_STAGING_DIR = 
"hbase.bulkload.staging.dir";
-
-  /**
-   * This returns the staging path for a given column family.
-   * This is needed for clean recovery and called reflectively in 
LoadIncrementalHFiles
-   */
-  public static Path getStagingPath(Configuration conf, String bulkToken, 
byte[] family) {
-    Path stageP = new Path(getBaseStagingDir(conf), bulkToken);
-    return new Path(stageP, Bytes.toString(family));
-  }
-
-  public static Path getBaseStagingDir(Configuration conf) {
-    String hbaseTmpFsDir =
-        conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
-          HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
-    return new Path(conf.get(BULKLOAD_STAGING_DIR, hbaseTmpFsDir));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 4a8f55c..4f8facc 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -302,6 +302,9 @@ public final class HConstants {
   /** Like the previous, but for old logs that are about to be deleted */
   public static final String HREGION_OLDLOGDIR_NAME = "oldWALs";
 
+  /** Staging dir used by bulk load */
+  public static final String BULKLOAD_STAGING_DIR_NAME = "staging";
+
   public static final String CORRUPT_DIR_NAME = "corrupt";
 
   /** Used by HBCK to sideline backup data */

http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-common/src/main/resources/hbase-default.xml
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 5b0700b..4f769cb 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -70,13 +70,6 @@ possible configurations would overwhelm and obscure the 
important.
     </description>
   </property>
   <property >
-    <name>hbase.bulkload.staging.dir</name>
-    <value>${hbase.fs.tmp.dir}</value>
-    <description>A staging directory in default file system (HDFS)
-    for bulk loading.
-    </description>
-  </property>
-  <property >
     <name>hbase.cluster.distributed</name>
     <value>false</value>
     <description>The mode the cluster will be in. Possible values are
@@ -1190,7 +1183,7 @@ possible configurations would overwhelm and obscure the 
important.
   <property>
     <name>hbase.rootdir.perms</name>
     <value>700</value>
-    <description>FS Permissions for the root directory in a secure (kerberos) 
setup.
+    <description>FS Permissions for the root data subdirectory in a secure 
(kerberos) setup.
     When master starts, it creates the rootdir with this permissions or sets 
the permissions
     if it does not match.</description>
   </property>

http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
----------------------------------------------------------------------
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
index 4e7c8d2..3cae4d2 100644
--- 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
+++ 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
@@ -118,7 +118,6 @@ public class HBaseCommonTestingUtility {
     if (deleteOnExit()) this.dataTestDir.deleteOnExit();
 
     createSubDir("hbase.local.dir", testPath, "hbase-local-dir");
-    createSubDir("hbase.bulkload.staging.dir", testPath, "staging");
 
     return testPath;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
index 13d1b94..e3542ef8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
@@ -133,8 +133,6 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   private int nrThreads;
   private RpcControllerFactory rpcControllerFactory;
 
-  private LoadIncrementalHFiles() {}
-
   public LoadIncrementalHFiles(Configuration conf) throws Exception {
     super(conf);
     this.rpcControllerFactory = new RpcControllerFactory(conf);
@@ -1199,7 +1197,7 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
 
   public static void main(String[] args) throws Exception {
     Configuration conf = HBaseConfiguration.create();
-    int ret = ToolRunner.run(conf, new LoadIncrementalHFiles(), args);
+    int ret = ToolRunner.run(conf, new LoadIncrementalHFiles(conf), args);
     System.exit(ret);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 0ce7411..a8f81ee 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.ClusterId;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.fs.HFileSystem;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
 import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -64,6 +66,25 @@ public class MasterFileSystem {
   // hbase temp directory used for table construction and deletion
   private final Path tempdir;
 
+
+  /*
+   * In a secure env, the protected sub-directories and files under the HBase 
rootDir
+   * would be restricted. The sub-directory will have '700' except the bulk 
load staging dir,
+   * which will have '711'.  The default '700' can be overwritten by setting 
the property
+   * 'hbase.rootdir.perms'. The protected files (version file, clusterId file) 
will have '600'.
+   * The rootDir itself will be created with HDFS default permissions if it 
does not exist.
+   * We will check the rootDir permissions to make sure it has 'x' for all to 
ensure access
+   * to the staging dir. If it does not, we will add it.
+   */
+  // Permissions for the directories under rootDir that need protection
+  private final FsPermission secureRootSubDirPerms;
+  // Permissions for the files under rootDir that need protection
+  private final FsPermission secureRootFilePerms = new FsPermission("600");
+  // Permissions for bulk load staging directory under rootDir
+  private final FsPermission HiddenDirPerms = 
FsPermission.valueOf("-rwx--x--x");
+
+  private boolean isSecurityEnabled;
+
   private final MasterServices services;
 
   public MasterFileSystem(MasterServices services) throws IOException {
@@ -81,6 +102,8 @@ public class MasterFileSystem {
     FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
     // make sure the fs has the same conf
     fs.setConf(conf);
+    this.secureRootSubDirPerms = new 
FsPermission(conf.get("hbase.rootdir.perms", "700"));
+    this.isSecurityEnabled = 
"kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication"));
     // setup the filesystem variable
     createInitialFileSystemLayout();
     HFileSystem.addLocationsOrderInterceptor(conf);
@@ -96,11 +119,46 @@ public class MasterFileSystem {
    * Idempotent.
    */
   private void createInitialFileSystemLayout() throws IOException {
+
+    final String[] protectedSubDirs = new String[] {
+        HConstants.BASE_NAMESPACE_DIR,
+        HConstants.HFILE_ARCHIVE_DIRECTORY,
+        HConstants.HREGION_LOGDIR_NAME,
+        HConstants.HREGION_OLDLOGDIR_NAME,
+        MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR,
+        HConstants.CORRUPT_DIR_NAME,
+        HConstants.HBCK_SIDELINEDIR_NAME,
+        MobConstants.MOB_DIR_NAME
+    };
     // check if the root directory exists
     checkRootDir(this.rootdir, conf, this.fs);
 
-    // check if temp directory exists and clean it
+    // Check the directories under rootdir.
     checkTempDir(this.tempdir, conf, this.fs);
+    for (String subDir : protectedSubDirs) {
+      checkSubDir(new Path(this.rootdir, subDir));
+    }
+
+    checkStagingDir();
+
+    // Handle the last few special files and set the final rootDir permissions
+    // rootDir needs 'x' for all to support bulk load staging dir
+    if (isSecurityEnabled) {
+      fs.setPermission(new Path(rootdir, HConstants.VERSION_FILE_NAME), 
secureRootFilePerms);
+      fs.setPermission(new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME), 
secureRootFilePerms);
+    }
+    FsPermission currentRootPerms = 
fs.getFileStatus(this.rootdir).getPermission();
+    if (!currentRootPerms.getUserAction().implies(FsAction.EXECUTE)
+        || !currentRootPerms.getGroupAction().implies(FsAction.EXECUTE)
+        || !currentRootPerms.getOtherAction().implies(FsAction.EXECUTE)) {
+      LOG.warn("rootdir permissions do not contain 'excute' for user, group or 
other. "
+        + "Automatically adding 'excute' permission for all");
+      fs.setPermission(
+        this.rootdir,
+        new 
FsPermission(currentRootPerms.getUserAction().or(FsAction.EXECUTE), 
currentRootPerms
+            .getGroupAction().or(FsAction.EXECUTE), 
currentRootPerms.getOtherAction().or(
+          FsAction.EXECUTE)));
+    }
   }
 
   public FileSystem getFileSystem() {
@@ -146,17 +204,10 @@ public class MasterFileSystem {
     // If FS is in safe mode wait till out of it.
     FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 
1000));
 
-    boolean isSecurityEnabled = 
"kerberos".equalsIgnoreCase(c.get("hbase.security.authentication"));
-    FsPermission rootDirPerms = new FsPermission(c.get("hbase.rootdir.perms", 
"700"));
-
     // Filesystem is good. Go ahead and check for hbase.rootdir.
     try {
       if (!fs.exists(rd)) {
-        if (isSecurityEnabled) {
-          fs.mkdirs(rd, rootDirPerms);
-        } else {
-          fs.mkdirs(rd);
-        }
+        fs.mkdirs(rd);
         // DFS leaves safe mode with 0 DNs when there are 0 blocks.
         // We used to handle this by checking the current DN count and waiting 
until
         // it is nonzero. With security, the check for datanode count doesn't 
work --
@@ -171,16 +222,6 @@ public class MasterFileSystem {
         if (!fs.isDirectory(rd)) {
           throw new IllegalArgumentException(rd.toString() + " is not a 
directory");
         }
-        if (isSecurityEnabled && 
!rootDirPerms.equals(fs.getFileStatus(rd).getPermission())) {
-          // check whether the permission match
-          LOG.warn("Found rootdir permissions NOT matching expected 
\"hbase.rootdir.perms\" for "
-              + "rootdir=" + rd.toString() + " permissions=" + 
fs.getFileStatus(rd).getPermission()
-              + " and  \"hbase.rootdir.perms\" configured as "
-              + c.get("hbase.rootdir.perms", "700") + ". Automatically setting 
the permissions. You"
-              + " can change the permissions by setting 
\"hbase.rootdir.perms\" in hbase-site.xml "
-              + "and restarting the master");
-          fs.setPermission(rd, rootDirPerms);
-        }
         // as above
         FSUtils.checkVersion(fs, rd, true, 
c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
           10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
@@ -239,8 +280,66 @@ public class MasterFileSystem {
     }
 
     // Create the temp directory
-    if (!fs.mkdirs(tmpdir)) {
-      throw new IOException("HBase temp directory '" + tmpdir + "' creation 
failure.");
+    if (isSecurityEnabled) {
+      if (!fs.mkdirs(tmpdir, secureRootSubDirPerms)) {
+        throw new IOException("HBase temp directory '" + tmpdir + "' creation 
failure.");
+      }
+    } else {
+      if (!fs.mkdirs(tmpdir)) {
+        throw new IOException("HBase temp directory '" + tmpdir + "' creation 
failure.");
+      }
+    }
+  }
+
+  /**
+   * Make sure the directories under rootDir have good permissions. Create if 
necessary.
+   * @param p
+   * @throws IOException
+   */
+  private void checkSubDir(final Path p) throws IOException {
+    if (!fs.exists(p)) {
+      if (isSecurityEnabled) {
+        if (!fs.mkdirs(p, secureRootSubDirPerms)) {
+          throw new IOException("HBase directory '" + p + "' creation 
failure.");
+        }
+      } else {
+        if (!fs.mkdirs(p)) {
+          throw new IOException("HBase directory '" + p + "' creation 
failure.");
+        }
+      }
+    }
+    else {
+      if (isSecurityEnabled && 
!secureRootSubDirPerms.equals(fs.getFileStatus(p).getPermission())) {
+        // check whether the permission match
+        LOG.warn("Found HBase directory permissions NOT matching expected 
permissions for "
+            + p.toString() + " permissions=" + 
fs.getFileStatus(p).getPermission()
+            + ", expecting " + secureRootSubDirPerms + ". Automatically 
setting the permissions. "
+            + "You can change the permissions by setting 
\"hbase.rootdir.perms\" in hbase-site.xml "
+            + "and restarting the master");
+        fs.setPermission(p, secureRootSubDirPerms);
+      }
+    }
+  }
+
+  /**
+   * Check permissions for bulk load staging directory. This directory has 
special hidden
+   * permissions. Create it if necessary.
+   * @throws IOException
+   */
+  private void checkStagingDir() throws IOException {
+    Path p = new Path(this.rootdir, HConstants.BULKLOAD_STAGING_DIR_NAME);
+    try {
+      if (!this.fs.exists(p)) {
+        if (!this.fs.mkdirs(p, HiddenDirPerms)) {
+          throw new IOException("Failed to create staging directory " + 
p.toString());
+        }
+      } else {
+        this.fs.setPermission(p, HiddenDirPerms);
+      }
+    } catch (IOException e) {
+      LOG.error("Failed to create or set permission on staging directory " + 
p.toString());
+      throw new IOException("Failed to create or set permission on staging 
directory "
+          + p.toString(), e);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
index 4d19e9e..e287a6d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
@@ -104,11 +104,6 @@ public class MasterWalManager {
     this.distributedLogReplay = this.splitLogManager.isLogReplaying();
 
     this.oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
-
-    // Make sure the region servers can archive their old logs
-    if (!this.fs.exists(oldLogDir)) {
-      this.fs.mkdirs(oldLogDir);
-    }
   }
 
   public void stop() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
index 9f53ac5..0d64e4e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver;
@@ -38,12 +39,12 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequ
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
 import org.apache.hadoop.hbase.regionserver.Region.BulkLoadListener;
-import org.apache.hadoop.hbase.security.SecureBulkLoadUtil;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.token.FsDelegationToken;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSHDFSUtils;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Methods;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.io.Text;
@@ -98,7 +99,6 @@ public class SecureBulkLoadManager {
 
   private final static FsPermission PERM_ALL_ACCESS = 
FsPermission.valueOf("-rwxrwxrwx");
   private final static FsPermission PERM_HIDDEN = 
FsPermission.valueOf("-rwx--x--x");
-
   private SecureRandom random;
   private FileSystem fs;
   private Configuration conf;
@@ -113,32 +113,18 @@ public class SecureBulkLoadManager {
     this.conf = conf;
   }
 
-  public void start() {
+  public void start() throws IOException {
     random = new SecureRandom();
-    baseStagingDir = SecureBulkLoadUtil.getBaseStagingDir(conf);
-    this.userProvider = UserProvider.instantiate(conf);
+    userProvider = UserProvider.instantiate(conf);
+    fs = FileSystem.get(conf);
+    baseStagingDir = new Path(FSUtils.getRootDir(conf), 
HConstants.BULKLOAD_STAGING_DIR_NAME);
 
-    try {
-      fs = FileSystem.get(conf);
+    if (conf.get("hbase.bulkload.staging.dir") != null) {
+      LOG.warn("hbase.bulkload.staging.dir " + " is deprecated. Bulkload 
staging directory is "
+          + baseStagingDir);
+    }
+    if (!fs.exists(baseStagingDir)) {
       fs.mkdirs(baseStagingDir, PERM_HIDDEN);
-      fs.setPermission(baseStagingDir, PERM_HIDDEN);
-      FileStatus status = fs.getFileStatus(baseStagingDir);
-      //no sticky bit in hadoop-1.0, making directory nonempty so it never 
gets erased
-      fs.mkdirs(new Path(baseStagingDir,"DONOTERASE"), PERM_HIDDEN);
-      if (status == null) {
-        throw new IllegalStateException("Failed to create staging directory "
-            + baseStagingDir.toString());
-      }
-      if (!status.getPermission().equals(PERM_HIDDEN)) {
-        throw new IllegalStateException(
-            "Staging directory already exists but permissions aren't set to 
'-rwx--x--x' "
-                + baseStagingDir.toString());
-      }
-    } catch (IOException e) {
-      LOG.error("Failed to create or set permission on staging directory "
-          + baseStagingDir.toString(), e);
-      throw new IllegalStateException("Failed to create or set permission on 
staging directory "
-          + baseStagingDir.toString(), e);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java
index 9893e7e..cc02fb6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.token.FsDelegationToken;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 
 /**
@@ -82,7 +83,7 @@ public class HFileReplicator {
   private UserProvider userProvider;
   private Configuration conf;
   private Connection connection;
-  private String hbaseStagingDir;
+  private Path hbaseStagingDir;
   private ThreadPoolExecutor exec;
   private int maxCopyThreads;
   private int copiesPerThread;
@@ -100,7 +101,7 @@ public class HFileReplicator {
 
     userProvider = UserProvider.instantiate(conf);
     fsDelegationToken = new FsDelegationToken(userProvider, "renewer");
-    this.hbaseStagingDir = conf.get("hbase.bulkload.staging.dir");
+    this.hbaseStagingDir = new Path(FSUtils.getRootDir(conf), 
HConstants.BULKLOAD_STAGING_DIR_NAME);
     this.maxCopyThreads =
         this.conf.getInt(REPLICATION_BULKLOAD_COPY_MAXTHREADS_KEY,
           REPLICATION_BULKLOAD_COPY_MAXTHREADS_DEFAULT);
@@ -253,7 +254,7 @@ public class HFileReplicator {
 
         // Create staging directory for each table
         Path stagingDir =
-            createStagingDir(new Path(hbaseStagingDir), user, 
TableName.valueOf(tableName));
+            createStagingDir(hbaseStagingDir, user, 
TableName.valueOf(tableName));
 
         familyHFilePathsPairsList = tableEntry.getValue();
         familyHFilePathsPairsListSize = familyHFilePathsPairsList.size();

http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
index cc10fad..f7b2c51 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
@@ -141,9 +141,11 @@ public class HFileCorruptionChecker {
     Path regionDir = cfDir.getParent();
     Path tableDir = regionDir.getParent();
 
-    // build up the corrupted dirs strcture
-    Path corruptBaseDir = new Path(FSUtils.getRootDir(conf), conf.get(
-        "hbase.hfile.quarantine.dir", HConstants.CORRUPT_DIR_NAME));
+    // build up the corrupted dirs structure
+    Path corruptBaseDir = new Path(FSUtils.getRootDir(conf), 
HConstants.CORRUPT_DIR_NAME);
+    if (conf.get("hbase.hfile.quarantine.dir") != null) {
+      LOG.warn("hbase.hfile.quarantine.dir is deprecated. Default to " + 
corruptBaseDir);
+    }
     Path corruptTableDir = new Path(corruptBaseDir, tableDir.getName());
     Path corruptRegionDir = new Path(corruptTableDir, regionDir.getName());
     Path corruptFamilyDir = new Path(corruptRegionDir, cfDir.getName());

http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 3e27834..04f1b3d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -477,9 +477,11 @@ public class WALSplitter {
       final List<Path> corruptedLogs,
       final List<Path> processedLogs, final Path oldLogDir,
       final FileSystem fs, final Configuration conf) throws IOException {
-    final Path corruptDir = new Path(FSUtils.getRootDir(conf), conf.get(
-        "hbase.regionserver.hlog.splitlog.corrupt.dir",  
HConstants.CORRUPT_DIR_NAME));
-
+    final Path corruptDir = new Path(FSUtils.getRootDir(conf), 
HConstants.CORRUPT_DIR_NAME);
+    if (conf.get("hbase.regionserver.hlog.splitlog.corrupt.dir") != null) {
+      LOG.warn("hbase.regionserver.hlog.splitlog.corrupt.dir is deprecated. 
Default to "
+          + corruptDir);
+    }
     if (!fs.mkdirs(corruptDir)) {
       LOG.info("Unable to mkdir " + corruptDir);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 7f53340..829661c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -1282,7 +1282,6 @@ public class HBaseTestingUtility extends 
HBaseCommonTestingUtility {
     } else {
       LOG.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString);
     }
-    this.conf.set("hbase.bulkload.staging.dir", 
this.conf.get("hbase.fs.tmp.dir"));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
index cff8005..0b96720 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
@@ -50,10 +50,10 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.hbase.security.SecureBulkLoadUtil;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MapReduceTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileTestUtil;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -342,7 +342,7 @@ public class TestLoadIncrementalHFiles {
     }
 
     // verify staging folder has been cleaned up
-    Path stagingBasePath = 
SecureBulkLoadUtil.getBaseStagingDir(util.getConfiguration());
+    Path stagingBasePath = new 
Path(FSUtils.getRootDir(util.getConfiguration()), 
HConstants.BULKLOAD_STAGING_DIR_NAME);
     if(fs.exists(stagingBasePath)) {
       FileStatus[] files = fs.listStatus(stagingBasePath);
       for(FileStatus file : files) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java
index 9ecc5d6..d195347 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java
@@ -38,7 +38,6 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.DelegationToken;
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.PrepareBulkLoadResponse;
 import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos;
-import org.apache.hadoop.hbase.security.SecureBulkLoadUtil;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.security.token.Token;
@@ -166,7 +165,4 @@ public class SecureBulkLoadEndpointClient {
     }
   }
 
-  public Path getStagingPath(String bulkToken, byte[] family) throws 
IOException {
-    return SecureBulkLoadUtil.getStagingPath(table.getConfiguration(), 
bulkToken, family);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
index 3136416..251ec9e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
@@ -1076,8 +1076,7 @@ public class TestWALSplit {
     useDifferentDFSClient();
     WALSplitter.split(HBASEDIR, WALDIR, OLDLOGDIR, fs, conf, wals);
 
-    final Path corruptDir = new Path(FSUtils.getRootDir(conf), conf.get(
-        "hbase.regionserver.hlog.splitlog.corrupt.dir", 
HConstants.CORRUPT_DIR_NAME));
+    final Path corruptDir = new Path(FSUtils.getRootDir(conf), 
HConstants.CORRUPT_DIR_NAME);
     assertEquals(1, fs.listStatus(corruptDir).length);
   }
 

Reply via email to