HDFS-10553. DiskBalancer: Rename Tools/DiskBalancer class to 
Tools/DiskBalancerCLI. Contributed by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35c5943b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35c5943b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35c5943b

Branch: refs/heads/HADOOP-13341
Commit: 35c5943b8ba394191405555cdfc5e6127053ee97
Parents: b07c266
Author: Anu Engineer <aengin...@apache.org>
Authored: Thu Sep 8 19:26:56 2016 -0700
Committer: Anu Engineer <aengin...@apache.org>
Committed: Thu Sep 8 19:26:56 2016 -0700

----------------------------------------------------------------------
 .../hadoop-hdfs/src/main/bin/hdfs               |   2 +-
 .../diskbalancer/command/CancelCommand.java     |  23 +-
 .../server/diskbalancer/command/Command.java    |   6 +-
 .../diskbalancer/command/ExecuteCommand.java    |  15 +-
 .../diskbalancer/command/HelpCommand.java       |  22 +-
 .../diskbalancer/command/PlanCommand.java       |  63 +--
 .../diskbalancer/command/QueryCommand.java      |  19 +-
 .../diskbalancer/command/ReportCommand.java     |  18 +-
 .../apache/hadoop/hdfs/tools/DiskBalancer.java  | 482 -------------------
 .../hadoop/hdfs/tools/DiskBalancerCLI.java      | 482 +++++++++++++++++++
 .../command/TestDiskBalancerCommand.java        |  16 +-
 11 files changed, 576 insertions(+), 572 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35c5943b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 5059528..7a90f08 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -127,7 +127,7 @@ function hdfscmd_case
       HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
     ;;
     diskbalancer)
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancer
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancerCLI
       hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
       HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
     ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35c5943b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
index 8b83e27..007272e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
-import org.apache.hadoop.hdfs.tools.DiskBalancer;
+import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
 
 import java.io.IOException;
 
@@ -44,9 +44,10 @@ public class CancelCommand extends Command {
    */
   public CancelCommand(Configuration conf) {
     super(conf);
-    addValidCommandParameters(DiskBalancer.CANCEL, "Cancels a running plan.");
-    addValidCommandParameters(DiskBalancer.NODE, "Node to run the command " +
-        "against in node:port format.");
+    addValidCommandParameters(DiskBalancerCLI.CANCEL,
+        "Cancels a running plan.");
+    addValidCommandParameters(DiskBalancerCLI.NODE,
+        "Node to run the command against in node:port format.");
   }
 
   /**
@@ -57,20 +58,20 @@ public class CancelCommand extends Command {
   @Override
   public void execute(CommandLine cmd) throws Exception {
     LOG.info("Executing \"Cancel plan\" command.");
-    Preconditions.checkState(cmd.hasOption(DiskBalancer.CANCEL));
-    verifyCommandOptions(DiskBalancer.CANCEL, cmd);
+    Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.CANCEL));
+    verifyCommandOptions(DiskBalancerCLI.CANCEL, cmd);
 
     // We can cancel a plan using datanode address and plan ID
     // that you can read from a datanode using queryStatus
-    if(cmd.hasOption(DiskBalancer.NODE)) {
-      String nodeAddress = cmd.getOptionValue(DiskBalancer.NODE);
-      String planHash = cmd.getOptionValue(DiskBalancer.CANCEL);
+    if(cmd.hasOption(DiskBalancerCLI.NODE)) {
+      String nodeAddress = cmd.getOptionValue(DiskBalancerCLI.NODE);
+      String planHash = cmd.getOptionValue(DiskBalancerCLI.CANCEL);
       cancelPlanUsingHash(nodeAddress, planHash);
     } else {
       // Or you can cancel a plan using the plan file. If the user
       // points us to the plan file, we can compute the hash as well as read
       // the address of the datanode from the plan file.
-      String planFile = cmd.getOptionValue(DiskBalancer.CANCEL);
+      String planFile = cmd.getOptionValue(DiskBalancerCLI.CANCEL);
       Preconditions.checkArgument(planFile != null && !planFile.isEmpty(),
           "Invalid plan file specified.");
       String planData = null;
@@ -142,6 +143,6 @@ public class CancelCommand extends Command {
     HelpFormatter helpFormatter = new HelpFormatter();
     helpFormatter.printHelp("hdfs diskbalancer -cancel <planFile> | -cancel " +
         "<planID> -node <hostname>",
-        header, DiskBalancer.getCancelOptions(), footer);
+        header, DiskBalancerCLI.getCancelOptions(), footer);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35c5943b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index 5acd0ac..7641b44 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -43,7 +43,7 @@ import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
-import org.apache.hadoop.hdfs.tools.DiskBalancer;
+import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.codehaus.jackson.map.ObjectMapper;
@@ -418,7 +418,7 @@ public abstract class Command extends Configured {
    * @return default top number of nodes.
    */
   protected int getDefaultTop() {
-    return DiskBalancer.DEFAULT_TOP;
+    return DiskBalancerCLI.DEFAULT_TOP;
   }
 
   /**
@@ -437,7 +437,7 @@ public abstract class Command extends Configured {
   protected int parseTopNodes(final CommandLine cmd, final StrBuilder result) {
     String outputLine = "";
     int nodes = 0;
-    final String topVal = cmd.getOptionValue(DiskBalancer.TOP);
+    final String topVal = cmd.getOptionValue(DiskBalancerCLI.TOP);
     if (StringUtils.isBlank(topVal)) {
       outputLine = String.format(
           "No top limit specified, using default top value %d.",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35c5943b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
index f363c34..3a348c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
-import org.apache.hadoop.hdfs.tools.DiskBalancer;
+import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
 
 import java.io.IOException;
 
@@ -46,7 +46,8 @@ public class ExecuteCommand extends Command {
    */
   public ExecuteCommand(Configuration conf) {
     super(conf);
-    addValidCommandParameters(DiskBalancer.EXECUTE, "Executes a given plan.");
+    addValidCommandParameters(DiskBalancerCLI.EXECUTE,
+        "Executes a given plan.");
   }
 
   /**
@@ -57,10 +58,10 @@ public class ExecuteCommand extends Command {
   @Override
   public void execute(CommandLine cmd) throws Exception {
     LOG.info("Executing \"execute plan\" command");
-    Preconditions.checkState(cmd.hasOption(DiskBalancer.EXECUTE));
-    verifyCommandOptions(DiskBalancer.EXECUTE, cmd);
+    Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.EXECUTE));
+    verifyCommandOptions(DiskBalancerCLI.EXECUTE, cmd);
 
-    String planFile = cmd.getOptionValue(DiskBalancer.EXECUTE);
+    String planFile = cmd.getOptionValue(DiskBalancerCLI.EXECUTE);
     Preconditions.checkArgument(planFile != null && !planFile.isEmpty(),
         "Invalid plan file specified.");
 
@@ -88,7 +89,7 @@ public class ExecuteCommand extends Command {
     String planHash = DigestUtils.shaHex(planData);
     try {
       // TODO : Support skipping date check.
-      dataNode.submitDiskBalancerPlan(planHash, DiskBalancer.PLAN_VERSION,
+      dataNode.submitDiskBalancerPlan(planHash, DiskBalancerCLI.PLAN_VERSION,
                                       planFile, planData, false);
     } catch (DiskBalancerException ex) {
       LOG.error("Submitting plan on  {} failed. Result: {}, Message: {}",
@@ -111,6 +112,6 @@ public class ExecuteCommand extends Command {
 
     HelpFormatter helpFormatter = new HelpFormatter();
     helpFormatter.printHelp("hdfs diskbalancer -execute <planfile>",
-        header, DiskBalancer.getExecuteOptions(), footer);
+        header, DiskBalancerCLI.getExecuteOptions(), footer);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35c5943b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
index 3c2fd0c..c735299 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
@@ -23,7 +23,7 @@ import com.google.common.base.Preconditions;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.HelpFormatter;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.tools.DiskBalancer;
+import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
 
 /**
  * Help Command prints out detailed help about each command.
@@ -37,7 +37,7 @@ public class HelpCommand extends Command {
    */
   public HelpCommand(Configuration conf) {
     super(conf);
-    addValidCommandParameters(DiskBalancer.HELP, "Help Command");
+    addValidCommandParameters(DiskBalancerCLI.HELP, "Help Command");
   }
 
   /**
@@ -53,9 +53,9 @@ public class HelpCommand extends Command {
       return;
     }
 
-    Preconditions.checkState(cmd.hasOption(DiskBalancer.HELP));
-    verifyCommandOptions(DiskBalancer.HELP, cmd);
-    String helpCommand = cmd.getOptionValue(DiskBalancer.HELP);
+    Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.HELP));
+    verifyCommandOptions(DiskBalancerCLI.HELP, cmd);
+    String helpCommand = cmd.getOptionValue(DiskBalancerCLI.HELP);
     if (helpCommand == null || helpCommand.isEmpty()) {
       this.printHelp();
       return;
@@ -65,19 +65,19 @@ public class HelpCommand extends Command {
     helpCommand = helpCommand.toLowerCase();
     Command command = null;
     switch (helpCommand) {
-    case DiskBalancer.PLAN:
+    case DiskBalancerCLI.PLAN:
       command = new PlanCommand(getConf());
       break;
-    case DiskBalancer.EXECUTE:
+    case DiskBalancerCLI.EXECUTE:
       command = new ExecuteCommand(getConf());
       break;
-    case DiskBalancer.QUERY:
+    case DiskBalancerCLI.QUERY:
       command = new QueryCommand(getConf());
       break;
-    case DiskBalancer.CANCEL:
+    case DiskBalancerCLI.CANCEL:
       command = new CancelCommand(getConf());
       break;
-    case DiskBalancer.REPORT:
+    case DiskBalancerCLI.REPORT:
       command = new ReportCommand(getConf(), null);
       break;
     default:
@@ -102,7 +102,7 @@ public class HelpCommand extends Command {
 
     HelpFormatter helpFormatter = new HelpFormatter();
     helpFormatter.printHelp("hdfs diskbalancer [command] [options]",
-        header, DiskBalancer.getHelpOptions(), "");
+        header, DiskBalancerCLI.getHelpOptions(), "");
   }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35c5943b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
index 72ad2c6..9749409 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
     .DiskBalancerDataNode;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step;
-import org.apache.hadoop.hdfs.tools.DiskBalancer;
+import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
 import java.nio.charset.StandardCharsets;
 import java.util.List;
 
@@ -53,18 +53,18 @@ public class PlanCommand extends Command {
     this.thresholdPercentage = 1;
     this.bandwidth = 0;
     this.maxError = 0;
-    addValidCommandParameters(DiskBalancer.OUTFILE, "Output directory in " +
+    addValidCommandParameters(DiskBalancerCLI.OUTFILE, "Output directory in " +
         "HDFS. The generated plan will be written to a file in this " +
         "directory.");
-    addValidCommandParameters(DiskBalancer.BANDWIDTH, "Maximum Bandwidth to " +
-        "be used while copying.");
-    addValidCommandParameters(DiskBalancer.THRESHOLD, "Percentage skew that " +
-        "we tolerate before diskbalancer starts working.");
-    addValidCommandParameters(DiskBalancer.MAXERROR, "Max errors to tolerate " 
+
-        "between 2 disks");
-    addValidCommandParameters(DiskBalancer.VERBOSE, "Run plan command in " +
+    addValidCommandParameters(DiskBalancerCLI.BANDWIDTH,
+        "Maximum Bandwidth to be used while copying.");
+    addValidCommandParameters(DiskBalancerCLI.THRESHOLD,
+        "Percentage skew that we tolerate before diskbalancer starts 
working.");
+    addValidCommandParameters(DiskBalancerCLI.MAXERROR,
+        "Max errors to tolerate between 2 disks");
+    addValidCommandParameters(DiskBalancerCLI.VERBOSE, "Run plan command in " +
         "verbose mode.");
-    addValidCommandParameters(DiskBalancer.PLAN, "Plan Command");
+    addValidCommandParameters(DiskBalancerCLI.PLAN, "Plan Command");
   }
 
   /**
@@ -77,36 +77,37 @@ public class PlanCommand extends Command {
   @Override
   public void execute(CommandLine cmd) throws Exception {
     LOG.debug("Processing Plan Command.");
-    Preconditions.checkState(cmd.hasOption(DiskBalancer.PLAN));
-    verifyCommandOptions(DiskBalancer.PLAN, cmd);
+    Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.PLAN));
+    verifyCommandOptions(DiskBalancerCLI.PLAN, cmd);
 
-    if (cmd.getOptionValue(DiskBalancer.PLAN) == null) {
+    if (cmd.getOptionValue(DiskBalancerCLI.PLAN) == null) {
       throw new IllegalArgumentException("A node name is required to create a" 
+
           " plan.");
     }
 
-    if (cmd.hasOption(DiskBalancer.BANDWIDTH)) {
-      this.bandwidth = Integer.parseInt(cmd.getOptionValue(DiskBalancer
+    if (cmd.hasOption(DiskBalancerCLI.BANDWIDTH)) {
+      this.bandwidth = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI
           .BANDWIDTH));
     }
 
-    if (cmd.hasOption(DiskBalancer.MAXERROR)) {
-      this.maxError = Integer.parseInt(cmd.getOptionValue(DiskBalancer
+    if (cmd.hasOption(DiskBalancerCLI.MAXERROR)) {
+      this.maxError = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI
           .MAXERROR));
     }
 
     readClusterInfo(cmd);
     String output = null;
-    if (cmd.hasOption(DiskBalancer.OUTFILE)) {
-      output = cmd.getOptionValue(DiskBalancer.OUTFILE);
+    if (cmd.hasOption(DiskBalancerCLI.OUTFILE)) {
+      output = cmd.getOptionValue(DiskBalancerCLI.OUTFILE);
     }
     setOutputPath(output);
 
     // -plan nodename is the command line argument.
-    DiskBalancerDataNode node = getNode(cmd.getOptionValue(DiskBalancer.PLAN));
+    DiskBalancerDataNode node =
+        getNode(cmd.getOptionValue(DiskBalancerCLI.PLAN));
     if (node == null) {
       throw new IllegalArgumentException("Unable to find the specified node. " 
+
-          cmd.getOptionValue(DiskBalancer.PLAN));
+          cmd.getOptionValue(DiskBalancerCLI.PLAN));
     }
     this.thresholdPercentage = getThresholdPercentage(cmd);
 
@@ -124,8 +125,8 @@ public class PlanCommand extends Command {
 
 
     try (FSDataOutputStream beforeStream = create(String.format(
-        DiskBalancer.BEFORE_TEMPLATE,
-        cmd.getOptionValue(DiskBalancer.PLAN)))) {
+        DiskBalancerCLI.BEFORE_TEMPLATE,
+        cmd.getOptionValue(DiskBalancerCLI.PLAN)))) {
       beforeStream.write(getCluster().toJson()
           .getBytes(StandardCharsets.UTF_8));
     }
@@ -133,17 +134,17 @@ public class PlanCommand extends Command {
     if (plan != null && plan.getVolumeSetPlans().size() > 0) {
       LOG.info("Writing plan to : {}", getOutputPath());
       try (FSDataOutputStream planStream = create(String.format(
-          DiskBalancer.PLAN_TEMPLATE,
-          cmd.getOptionValue(DiskBalancer.PLAN)))) {
+          DiskBalancerCLI.PLAN_TEMPLATE,
+          cmd.getOptionValue(DiskBalancerCLI.PLAN)))) {
         planStream.write(plan.toJson().getBytes(StandardCharsets.UTF_8));
       }
     } else {
       LOG.info("No plan generated. DiskBalancing not needed for node: {} " +
-              "threshold used: {}", cmd.getOptionValue(DiskBalancer.PLAN),
+              "threshold used: {}", cmd.getOptionValue(DiskBalancerCLI.PLAN),
           this.thresholdPercentage);
     }
 
-    if (cmd.hasOption(DiskBalancer.VERBOSE) && plans.size() > 0) {
+    if (cmd.hasOption(DiskBalancerCLI.VERBOSE) && plans.size() > 0) {
       printToScreen(plans);
     }
   }
@@ -162,8 +163,8 @@ public class PlanCommand extends Command {
         " will balance the data.";
 
     HelpFormatter helpFormatter = new HelpFormatter();
-    helpFormatter.printHelp("hdfs diskbalancer -plan " +
-        "<hostname> [options]", header, DiskBalancer.getPlanOptions(), footer);
+    helpFormatter.printHelp("hdfs diskbalancer -plan <hostname> [options]",
+        header, DiskBalancerCLI.getPlanOptions(), footer);
   }
 
   /**
@@ -174,8 +175,8 @@ public class PlanCommand extends Command {
    */
   private double getThresholdPercentage(CommandLine cmd) {
     Double value = 0.0;
-    if (cmd.hasOption(DiskBalancer.THRESHOLD)) {
-      value = Double.parseDouble(cmd.getOptionValue(DiskBalancer.THRESHOLD));
+    if (cmd.hasOption(DiskBalancerCLI.THRESHOLD)) {
+      value = 
Double.parseDouble(cmd.getOptionValue(DiskBalancerCLI.THRESHOLD));
     }
 
     if ((value <= 0.0) || (value > 100.0)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35c5943b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
index 1557a02..a8adcbd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus;
 import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
-import org.apache.hadoop.hdfs.tools.DiskBalancer;
+import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
 import org.apache.hadoop.net.NetUtils;
 
 /**
@@ -42,9 +42,10 @@ public class QueryCommand extends Command {
    */
   public QueryCommand(Configuration conf) {
     super(conf);
-    addValidCommandParameters(DiskBalancer.QUERY, "Queries the status of disk" 
+
-        " plan running on a given datanode.");
-    addValidCommandParameters(DiskBalancer.VERBOSE, "Prints verbose results.");
+    addValidCommandParameters(DiskBalancerCLI.QUERY,
+        "Queries the status of disk plan running on a given datanode.");
+    addValidCommandParameters(DiskBalancerCLI.VERBOSE,
+        "Prints verbose results.");
   }
 
   /**
@@ -55,9 +56,9 @@ public class QueryCommand extends Command {
   @Override
   public void execute(CommandLine cmd) throws Exception {
     LOG.info("Executing \"query plan\" command.");
-    Preconditions.checkState(cmd.hasOption(DiskBalancer.QUERY));
-    verifyCommandOptions(DiskBalancer.QUERY, cmd);
-    String nodeName = cmd.getOptionValue(DiskBalancer.QUERY);
+    Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.QUERY));
+    verifyCommandOptions(DiskBalancerCLI.QUERY, cmd);
+    String nodeName = cmd.getOptionValue(DiskBalancerCLI.QUERY);
     Preconditions.checkNotNull(nodeName);
     nodeName = nodeName.trim();
     String nodeAddress = nodeName;
@@ -79,7 +80,7 @@ public class QueryCommand extends Command {
               workStatus.getPlanID(),
               workStatus.getResult().toString());
 
-      if (cmd.hasOption(DiskBalancer.VERBOSE)) {
+      if (cmd.hasOption(DiskBalancerCLI.VERBOSE)) {
         System.out.printf("%s", workStatus.currentStateString());
       }
     } catch (DiskBalancerException ex) {
@@ -101,6 +102,6 @@ public class QueryCommand extends Command {
 
     HelpFormatter helpFormatter = new HelpFormatter();
     helpFormatter.printHelp("hdfs diskbalancer -query <hostname>  [options]",
-        header, DiskBalancer.getQueryOptions(), footer);
+        header, DiskBalancerCLI.getQueryOptions(), footer);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35c5943b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
index 18dd77e..69b765f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
-import org.apache.hadoop.hdfs.tools.DiskBalancer;
+import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -52,15 +52,15 @@ public class ReportCommand extends Command {
     super(conf);
     this.out = out;
 
-    addValidCommandParameters(DiskBalancer.REPORT,
+    addValidCommandParameters(DiskBalancerCLI.REPORT,
         "Report volume information of nodes.");
 
     String desc = String.format(
         "Top number of nodes to be processed. Default: %d", getDefaultTop());
-    addValidCommandParameters(DiskBalancer.TOP, desc);
+    addValidCommandParameters(DiskBalancerCLI.TOP, desc);
 
     desc = String.format("Print out volume information for a DataNode.");
-    addValidCommandParameters(DiskBalancer.NODE, desc);
+    addValidCommandParameters(DiskBalancerCLI.NODE, desc);
   }
 
   @Override
@@ -69,8 +69,8 @@ public class ReportCommand extends Command {
     String outputLine = "Processing report command";
     recordOutput(result, outputLine);
 
-    Preconditions.checkState(cmd.hasOption(DiskBalancer.REPORT));
-    verifyCommandOptions(DiskBalancer.REPORT, cmd);
+    Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.REPORT));
+    verifyCommandOptions(DiskBalancerCLI.REPORT, cmd);
     readClusterInfo(cmd);
 
     final String nodeFormat =
@@ -81,7 +81,7 @@ public class ReportCommand extends Command {
         "[%s: volume-%s] - %.2f used: %d/%d, %.2f free: %d/%d, "
         + "isFailed: %s, isReadOnly: %s, isSkip: %s, isTransient: %s.";
 
-    if (cmd.hasOption(DiskBalancer.NODE)) {
+    if (cmd.hasOption(DiskBalancerCLI.NODE)) {
       /*
        * Reporting volume information for a specific DataNode
        */
@@ -136,7 +136,7 @@ public class ReportCommand extends Command {
      * get value that identifies a DataNode from command line, it could be 
UUID,
      * IP address or host name.
      */
-    final String nodeVal = cmd.getOptionValue(DiskBalancer.NODE);
+    final String nodeVal = cmd.getOptionValue(DiskBalancerCLI.NODE);
 
     if (StringUtils.isBlank(nodeVal)) {
       outputLine = "The value for '-node' is neither specified or empty.";
@@ -211,6 +211,6 @@ public class ReportCommand extends Command {
     HelpFormatter helpFormatter = new HelpFormatter();
     helpFormatter.printHelp("hdfs diskbalancer -fs http://namenode.uri " +
         "-report [options]",
-        header, DiskBalancer.getReportOptions(), footer);
+        header, DiskBalancerCLI.getReportOptions(), footer);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35c5943b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
deleted file mode 100644
index 1ed2fdc..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
+++ /dev/null
@@ -1,482 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.hdfs.tools;
-
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.OptionBuilder;
-import org.apache.commons.cli.Options;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.server.diskbalancer.command.CancelCommand;
-import org.apache.hadoop.hdfs.server.diskbalancer.command.Command;
-import org.apache.hadoop.hdfs.server.diskbalancer.command.ExecuteCommand;
-import org.apache.hadoop.hdfs.server.diskbalancer.command.HelpCommand;
-import org.apache.hadoop.hdfs.server.diskbalancer.command.PlanCommand;
-import org.apache.hadoop.hdfs.server.diskbalancer.command.QueryCommand;
-import org.apache.hadoop.hdfs.server.diskbalancer.command.ReportCommand;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.PrintStream;
-
-/**
- * DiskBalancer is a tool that can be used to ensure that data is spread evenly
- * across volumes of same storage type.
- * <p>
- * For example, if you have 3 disks, with 100 GB , 600 GB and 200 GB on each
- * disk, this tool will ensure that each disk will have 300 GB.
- * <p>
- * This tool can be run while data nodes are fully functional.
- * <p>
- * At very high level diskbalancer computes a set of moves that will make disk
- * utilization equal and then those moves are executed by the datanode.
- */
-public class DiskBalancer extends Configured implements Tool {
-  /**
-   * Computes a plan for a given set of nodes.
-   */
-  public static final String PLAN = "plan";
-  /**
-   * Output file name, for commands like report, plan etc. This is an optional
-   * argument, by default diskbalancer will write all its output to
-   * /system/reports/diskbalancer of the current cluster it is operating
-   * against.
-   */
-  public static final String OUTFILE = "out";
-  /**
-   * Help for the program.
-   */
-  public static final String HELP = "help";
-  /**
-   * Percentage of data unevenness that we are willing to live with. For 
example
-   * - a value like 10 indicates that we are okay with 10 % +/- from
-   * idealStorage Target.
-   */
-  public static final String THRESHOLD = "thresholdPercentage";
-  /**
-   * Specifies the maximum disk bandwidth to use per second.
-   */
-  public static final String BANDWIDTH = "bandwidth";
-  /**
-   * Specifies the maximum errors to tolerate.
-   */
-  public static final String MAXERROR = "maxerror";
-  /**
-   * Executes a given plan file on the target datanode.
-   */
-  public static final String EXECUTE = "execute";
-  /**
-   * The report command prints out a disk fragmentation report about the data
-   * cluster. By default it prints the DEFAULT_TOP machines names with high
-   * nodeDataDensity {DiskBalancerDataNode#getNodeDataDensity} values. This
-   * means that these are the nodes that deviates from the ideal data
-   * distribution.
-   */
-  public static final String REPORT = "report";
-  /**
-   * specify top number of nodes to be processed.
-   */
-  public static final String TOP = "top";
-  /**
-   * specify default top number of nodes to be processed.
-   */
-  public static final int DEFAULT_TOP = 100;
-  /**
-   * Name or address of the node to execute against.
-   */
-  public static final String NODE = "node";
-  /**
-   * Runs the command in verbose mode.
-   */
-  public static final String VERBOSE = "v";
-  public static final int PLAN_VERSION = 1;
-  /**
-   * Reports the status of disk balancer operation.
-   */
-  public static final String QUERY = "query";
-  /**
-   * Cancels a running plan.
-   */
-  public static final String CANCEL = "cancel";
-  /**
-   * Template for the Before File. It is node.before.json.
-   */
-  public static final String BEFORE_TEMPLATE = "%s.before.json";
-  /**
-   * Template for the plan file. it is node.plan.json.
-   */
-  public static final String PLAN_TEMPLATE = "%s.plan.json";
-  private static final Logger LOG =
-      LoggerFactory.getLogger(DiskBalancer.class);
-
-  private static final Options PLAN_OPTIONS = new Options();
-  private static final Options EXECUTE_OPTIONS = new Options();
-  private static final Options QUERY_OPTIONS = new Options();
-  private static final Options HELP_OPTIONS = new Options();
-  private static final Options CANCEL_OPTIONS = new Options();
-  private static final Options REPORT_OPTIONS = new Options();
-
-  /**
-   * Construct a DiskBalancer.
-   *
-   * @param conf
-   */
-  public DiskBalancer(Configuration conf) {
-    super(conf);
-  }
-
-  /**
-   * Main for the  DiskBalancer Command handling.
-   *
-   * @param argv - System Args Strings[]
-   * @throws Exception
-   */
-  public static void main(String[] argv) throws Exception {
-    DiskBalancer shell = new DiskBalancer(new HdfsConfiguration());
-    int res = 0;
-    try {
-      res = ToolRunner.run(shell, argv);
-    } catch (Exception ex) {
-      LOG.error(ex.toString());
-      res = 1;
-    }
-    System.exit(res);
-  }
-
-  /**
-   * Execute the command with the given arguments.
-   *
-   * @param args command specific arguments.
-   * @return exit code.
-   * @throws Exception
-   */
-  @Override
-  public int run(String[] args) throws Exception {
-    return run(args, System.out);
-  }
-
-  /**
-   * Execute the command with the given arguments.
-   *
-   * @param args command specific arguments.
-   * @param out  the output stream used for printing
-   * @return exit code.
-   * @throws Exception
-   */
-  public int run(String[] args, final PrintStream out) throws Exception {
-    Options opts = getOpts();
-    CommandLine cmd = parseArgs(args, opts);
-    return dispatch(cmd, opts, out);
-  }
-
-  /**
-   * returns the Command Line Options.
-   *
-   * @return Options
-   */
-  private Options getOpts() {
-    Options opts = new Options();
-    addPlanCommands(opts);
-    addHelpCommands(opts);
-    addExecuteCommands(opts);
-    addQueryCommands(opts);
-    addCancelCommands(opts);
-    addReportCommands(opts);
-    return opts;
-  }
-
-  /**
-   * Returns Plan options.
-   *
-   * @return Options.
-   */
-  public static Options getPlanOptions() {
-    return PLAN_OPTIONS;
-  }
-
-  /**
-   * Returns help options.
-   *
-   * @return - help options.
-   */
-  public static Options getHelpOptions() {
-    return HELP_OPTIONS;
-  }
-
-  /**
-   * Retuns execute options.
-   *
-   * @return - execute options.
-   */
-  public static Options getExecuteOptions() {
-    return EXECUTE_OPTIONS;
-  }
-
-  /**
-   * Returns Query Options.
-   *
-   * @return query Options
-   */
-  public static Options getQueryOptions() {
-    return QUERY_OPTIONS;
-  }
-
-  /**
-   * Returns Cancel Options.
-   *
-   * @return Options
-   */
-  public static Options getCancelOptions() {
-    return CANCEL_OPTIONS;
-  }
-
-  /**
-   * Returns Report Options.
-   *
-   * @return Options
-   */
-  public static Options getReportOptions() {
-    return REPORT_OPTIONS;
-  }
-
-  /**
-   * Adds commands for plan command.
-   *
-   * @return Options.
-   */
-  private void addPlanCommands(Options opt) {
-
-    Option plan = OptionBuilder.withLongOpt(PLAN)
-        .withDescription("Hostname, IP address or UUID of datanode " +
-            "for which a plan is created.")
-        .hasArg()
-        .create();
-    getPlanOptions().addOption(plan);
-    opt.addOption(plan);
-
-
-    Option outFile = OptionBuilder.withLongOpt(OUTFILE).hasArg()
-        .withDescription(
-            "Local path of file to write output to, if not specified "
-                + "defaults will be used.")
-        .create();
-    getPlanOptions().addOption(outFile);
-    opt.addOption(outFile);
-
-    Option bandwidth = OptionBuilder.withLongOpt(BANDWIDTH).hasArg()
-        .withDescription(
-            "Maximum disk bandwidth (MB/s) in integer to be consumed by "
-                + "diskBalancer. e.g. 10 MB/s.")
-        .create();
-    getPlanOptions().addOption(bandwidth);
-    opt.addOption(bandwidth);
-
-    Option threshold = OptionBuilder.withLongOpt(THRESHOLD)
-        .hasArg()
-        .withDescription("Percentage of data skew that is tolerated before"
-            + " disk balancer starts working. For example, if"
-            + " total data on a 2 disk node is 100 GB then disk"
-            + " balancer calculates the expected value on each disk,"
-            + " which is 50 GB. If the tolerance is 10% then data"
-            + " on a single disk needs to be more than 60 GB"
-            + " (50 GB + 10% tolerance value) for Disk balancer to"
-            + " balance the disks.")
-        .create();
-    getPlanOptions().addOption(threshold);
-    opt.addOption(threshold);
-
-
-    Option maxError = OptionBuilder.withLongOpt(MAXERROR)
-        .hasArg()
-        .withDescription("Describes how many errors " +
-            "can be tolerated while copying between a pair of disks.")
-        .create();
-    getPlanOptions().addOption(maxError);
-    opt.addOption(maxError);
-
-    Option verbose = OptionBuilder.withLongOpt(VERBOSE)
-        .withDescription("Print out the summary of the plan on console")
-        .create();
-    getPlanOptions().addOption(verbose);
-    opt.addOption(verbose);
-  }
-
-  /**
-   * Adds Help to the options.
-   */
-  private void addHelpCommands(Options opt) {
-    Option help = OptionBuilder.withLongOpt(HELP)
-        .hasOptionalArg()
-        .withDescription("valid commands are plan | execute | query | cancel" +
-            " | report")
-        .create();
-    getHelpOptions().addOption(help);
-    opt.addOption(help);
-  }
-
-  /**
-   * Adds execute command options.
-   *
-   * @param opt Options
-   */
-  private void addExecuteCommands(Options opt) {
-    Option execute = OptionBuilder.withLongOpt(EXECUTE)
-        .hasArg()
-        .withDescription("Takes a plan file and " +
-            "submits it for execution by the datanode.")
-        .create();
-    getExecuteOptions().addOption(execute);
-    opt.addOption(execute);
-  }
-
-  /**
-   * Adds query command options.
-   *
-   * @param opt Options
-   */
-  private void addQueryCommands(Options opt) {
-    Option query = OptionBuilder.withLongOpt(QUERY)
-        .hasArg()
-        .withDescription("Queries the disk balancer " +
-            "status of a given datanode.")
-        .create();
-    getQueryOptions().addOption(query);
-    opt.addOption(query);
-
-    // Please note: Adding this only to Query options since -v is already
-    // added to global table.
-    Option verbose = OptionBuilder.withLongOpt(VERBOSE)
-        .withDescription("Prints details of the plan that is being executed " +
-            "on the node.")
-        .create();
-    getQueryOptions().addOption(verbose);
-  }
-
-  /**
-   * Adds cancel command options.
-   *
-   * @param opt Options
-   */
-  private void addCancelCommands(Options opt) {
-    Option cancel = OptionBuilder.withLongOpt(CANCEL)
-        .hasArg()
-        .withDescription("Cancels a running plan using a plan file.")
-        .create();
-    getCancelOptions().addOption(cancel);
-    opt.addOption(cancel);
-
-    Option node = OptionBuilder.withLongOpt(NODE)
-        .hasArg()
-        .withDescription("Cancels a running plan using a plan ID and hostName")
-        .create();
-
-    getCancelOptions().addOption(node);
-    opt.addOption(node);
-  }
-
-  /**
-   * Adds report command options.
-   *
-   * @param opt Options
-   */
-  private void addReportCommands(Options opt) {
-    Option report = OptionBuilder.withLongOpt(REPORT)
-        .withDescription("List nodes that will benefit from running " +
-            "DiskBalancer.")
-        .create();
-    getReportOptions().addOption(report);
-    opt.addOption(report);
-
-    Option top = OptionBuilder.withLongOpt(TOP)
-        .hasArg()
-        .withDescription("specify the number of nodes to be listed which has" +
-            " data imbalance.")
-        .create();
-    getReportOptions().addOption(top);
-    opt.addOption(top);
-
-    Option node =  OptionBuilder.withLongOpt(NODE)
-        .hasArg()
-        .withDescription("Datanode address, " +
-            "it can be DataNodeID, IP or hostname.")
-        .create();
-    getReportOptions().addOption(node);
-    opt.addOption(node);
-  }
-
-  /**
-   * This function parses all command line arguments and returns the 
appropriate
-   * values.
-   *
-   * @param argv - Argv from main
-   * @return CommandLine
-   */
-  private CommandLine parseArgs(String[] argv, Options opts)
-      throws org.apache.commons.cli.ParseException {
-    BasicParser parser = new BasicParser();
-    return parser.parse(opts, argv);
-  }
-
-  /**
-   * Dispatches calls to the right command Handler classes.
-   *
-   * @param cmd  - CommandLine
-   * @param opts options of command line
-   * @param out  the output stream used for printing
-   */
-  private int dispatch(CommandLine cmd, Options opts, final PrintStream out)
-      throws Exception {
-    Command currentCommand = null;
-    if (cmd.hasOption(DiskBalancer.PLAN)) {
-      currentCommand = new PlanCommand(getConf());
-    }
-
-    if (cmd.hasOption(DiskBalancer.EXECUTE)) {
-      currentCommand = new ExecuteCommand(getConf());
-    }
-
-    if (cmd.hasOption(DiskBalancer.QUERY)) {
-      currentCommand = new QueryCommand(getConf());
-    }
-
-    if (cmd.hasOption(DiskBalancer.CANCEL)) {
-      currentCommand = new CancelCommand(getConf());
-    }
-
-    if (cmd.hasOption(DiskBalancer.REPORT)) {
-      currentCommand = new ReportCommand(getConf(), out);
-    }
-
-    if (cmd.hasOption(DiskBalancer.HELP)) {
-      currentCommand = new HelpCommand(getConf());
-    }
-
-    // Invoke main help here.
-    if (currentCommand == null) {
-      new HelpCommand(getConf()).execute(null);
-      return 1;
-    }
-
-    currentCommand.execute(cmd);
-    return 0;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35c5943b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancerCLI.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancerCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancerCLI.java
new file mode 100644
index 0000000..e961c14
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancerCLI.java
@@ -0,0 +1,482 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.hdfs.tools;
+
+import org.apache.commons.cli.BasicParser;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.Options;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.server.diskbalancer.command.CancelCommand;
+import org.apache.hadoop.hdfs.server.diskbalancer.command.Command;
+import org.apache.hadoop.hdfs.server.diskbalancer.command.ExecuteCommand;
+import org.apache.hadoop.hdfs.server.diskbalancer.command.HelpCommand;
+import org.apache.hadoop.hdfs.server.diskbalancer.command.PlanCommand;
+import org.apache.hadoop.hdfs.server.diskbalancer.command.QueryCommand;
+import org.apache.hadoop.hdfs.server.diskbalancer.command.ReportCommand;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.PrintStream;
+
+/**
+ * DiskBalancer is a tool that can be used to ensure that data is spread evenly
+ * across volumes of same storage type.
+ * <p>
+ * For example, if you have 3 disks, with 100 GB , 600 GB and 200 GB on each
+ * disk, this tool will ensure that each disk will have 300 GB.
+ * <p>
+ * This tool can be run while data nodes are fully functional.
+ * <p>
+ * At very high level diskbalancer computes a set of moves that will make disk
+ * utilization equal and then those moves are executed by the datanode.
+ */
+public class DiskBalancerCLI extends Configured implements Tool {
+  /**
+   * Computes a plan for a given set of nodes.
+   */
+  public static final String PLAN = "plan";
+  /**
+   * Output file name, for commands like report, plan etc. This is an optional
+   * argument, by default diskbalancer will write all its output to
+   * /system/reports/diskbalancer of the current cluster it is operating
+   * against.
+   */
+  public static final String OUTFILE = "out";
+  /**
+   * Help for the program.
+   */
+  public static final String HELP = "help";
+  /**
+   * Percentage of data unevenness that we are willing to live with. For 
example
+   * - a value like 10 indicates that we are okay with 10 % +/- from
+   * idealStorage Target.
+   */
+  public static final String THRESHOLD = "thresholdPercentage";
+  /**
+   * Specifies the maximum disk bandwidth to use per second.
+   */
+  public static final String BANDWIDTH = "bandwidth";
+  /**
+   * Specifies the maximum errors to tolerate.
+   */
+  public static final String MAXERROR = "maxerror";
+  /**
+   * Executes a given plan file on the target datanode.
+   */
+  public static final String EXECUTE = "execute";
+  /**
+   * The report command prints out a disk fragmentation report about the data
+   * cluster. By default it prints the DEFAULT_TOP machines names with high
+   * nodeDataDensity {DiskBalancerDataNode#getNodeDataDensity} values. This
+   * means that these are the nodes that deviates from the ideal data
+   * distribution.
+   */
+  public static final String REPORT = "report";
+  /**
+   * specify top number of nodes to be processed.
+   */
+  public static final String TOP = "top";
+  /**
+   * specify default top number of nodes to be processed.
+   */
+  public static final int DEFAULT_TOP = 100;
+  /**
+   * Name or address of the node to execute against.
+   */
+  public static final String NODE = "node";
+  /**
+   * Runs the command in verbose mode.
+   */
+  public static final String VERBOSE = "v";
+  public static final int PLAN_VERSION = 1;
+  /**
+   * Reports the status of disk balancer operation.
+   */
+  public static final String QUERY = "query";
+  /**
+   * Cancels a running plan.
+   */
+  public static final String CANCEL = "cancel";
+  /**
+   * Template for the Before File. It is node.before.json.
+   */
+  public static final String BEFORE_TEMPLATE = "%s.before.json";
+  /**
+   * Template for the plan file. it is node.plan.json.
+   */
+  public static final String PLAN_TEMPLATE = "%s.plan.json";
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DiskBalancerCLI.class);
+
+  private static final Options PLAN_OPTIONS = new Options();
+  private static final Options EXECUTE_OPTIONS = new Options();
+  private static final Options QUERY_OPTIONS = new Options();
+  private static final Options HELP_OPTIONS = new Options();
+  private static final Options CANCEL_OPTIONS = new Options();
+  private static final Options REPORT_OPTIONS = new Options();
+
+  /**
+   * Construct a DiskBalancer.
+   *
+   * @param conf
+   */
+  public DiskBalancerCLI(Configuration conf) {
+    super(conf);
+  }
+
+  /**
+   * Main for the  DiskBalancer Command handling.
+   *
+   * @param argv - System Args Strings[]
+   * @throws Exception
+   */
+  public static void main(String[] argv) throws Exception {
+    DiskBalancerCLI shell = new DiskBalancerCLI(new HdfsConfiguration());
+    int res = 0;
+    try {
+      res = ToolRunner.run(shell, argv);
+    } catch (Exception ex) {
+      LOG.error(ex.toString());
+      res = 1;
+    }
+    System.exit(res);
+  }
+
+  /**
+   * Execute the command with the given arguments.
+   *
+   * @param args command specific arguments.
+   * @return exit code.
+   * @throws Exception
+   */
+  @Override
+  public int run(String[] args) throws Exception {
+    return run(args, System.out);
+  }
+
+  /**
+   * Execute the command with the given arguments.
+   *
+   * @param args command specific arguments.
+   * @param out  the output stream used for printing
+   * @return exit code.
+   * @throws Exception
+   */
+  public int run(String[] args, final PrintStream out) throws Exception {
+    Options opts = getOpts();
+    CommandLine cmd = parseArgs(args, opts);
+    return dispatch(cmd, opts, out);
+  }
+
+  /**
+   * returns the Command Line Options.
+   *
+   * @return Options
+   */
+  private Options getOpts() {
+    Options opts = new Options();
+    addPlanCommands(opts);
+    addHelpCommands(opts);
+    addExecuteCommands(opts);
+    addQueryCommands(opts);
+    addCancelCommands(opts);
+    addReportCommands(opts);
+    return opts;
+  }
+
+  /**
+   * Returns Plan options.
+   *
+   * @return Options.
+   */
+  public static Options getPlanOptions() {
+    return PLAN_OPTIONS;
+  }
+
+  /**
+   * Returns help options.
+   *
+   * @return - help options.
+   */
+  public static Options getHelpOptions() {
+    return HELP_OPTIONS;
+  }
+
+  /**
+   * Retuns execute options.
+   *
+   * @return - execute options.
+   */
+  public static Options getExecuteOptions() {
+    return EXECUTE_OPTIONS;
+  }
+
+  /**
+   * Returns Query Options.
+   *
+   * @return query Options
+   */
+  public static Options getQueryOptions() {
+    return QUERY_OPTIONS;
+  }
+
+  /**
+   * Returns Cancel Options.
+   *
+   * @return Options
+   */
+  public static Options getCancelOptions() {
+    return CANCEL_OPTIONS;
+  }
+
+  /**
+   * Returns Report Options.
+   *
+   * @return Options
+   */
+  public static Options getReportOptions() {
+    return REPORT_OPTIONS;
+  }
+
+  /**
+   * Adds commands for plan command.
+   *
+   * @return Options.
+   */
+  private void addPlanCommands(Options opt) {
+
+    Option plan = OptionBuilder.withLongOpt(PLAN)
+        .withDescription("Hostname, IP address or UUID of datanode " +
+            "for which a plan is created.")
+        .hasArg()
+        .create();
+    getPlanOptions().addOption(plan);
+    opt.addOption(plan);
+
+
+    Option outFile = OptionBuilder.withLongOpt(OUTFILE).hasArg()
+        .withDescription(
+            "Local path of file to write output to, if not specified "
+                + "defaults will be used.")
+        .create();
+    getPlanOptions().addOption(outFile);
+    opt.addOption(outFile);
+
+    Option bandwidth = OptionBuilder.withLongOpt(BANDWIDTH).hasArg()
+        .withDescription(
+            "Maximum disk bandwidth (MB/s) in integer to be consumed by "
+                + "diskBalancer. e.g. 10 MB/s.")
+        .create();
+    getPlanOptions().addOption(bandwidth);
+    opt.addOption(bandwidth);
+
+    Option threshold = OptionBuilder.withLongOpt(THRESHOLD)
+        .hasArg()
+        .withDescription("Percentage of data skew that is tolerated before"
+            + " disk balancer starts working. For example, if"
+            + " total data on a 2 disk node is 100 GB then disk"
+            + " balancer calculates the expected value on each disk,"
+            + " which is 50 GB. If the tolerance is 10% then data"
+            + " on a single disk needs to be more than 60 GB"
+            + " (50 GB + 10% tolerance value) for Disk balancer to"
+            + " balance the disks.")
+        .create();
+    getPlanOptions().addOption(threshold);
+    opt.addOption(threshold);
+
+
+    Option maxError = OptionBuilder.withLongOpt(MAXERROR)
+        .hasArg()
+        .withDescription("Describes how many errors " +
+            "can be tolerated while copying between a pair of disks.")
+        .create();
+    getPlanOptions().addOption(maxError);
+    opt.addOption(maxError);
+
+    Option verbose = OptionBuilder.withLongOpt(VERBOSE)
+        .withDescription("Print out the summary of the plan on console")
+        .create();
+    getPlanOptions().addOption(verbose);
+    opt.addOption(verbose);
+  }
+
+  /**
+   * Adds Help to the options.
+   */
+  private void addHelpCommands(Options opt) {
+    Option help = OptionBuilder.withLongOpt(HELP)
+        .hasOptionalArg()
+        .withDescription("valid commands are plan | execute | query | cancel" +
+            " | report")
+        .create();
+    getHelpOptions().addOption(help);
+    opt.addOption(help);
+  }
+
+  /**
+   * Adds execute command options.
+   *
+   * @param opt Options
+   */
+  private void addExecuteCommands(Options opt) {
+    Option execute = OptionBuilder.withLongOpt(EXECUTE)
+        .hasArg()
+        .withDescription("Takes a plan file and " +
+            "submits it for execution by the datanode.")
+        .create();
+    getExecuteOptions().addOption(execute);
+    opt.addOption(execute);
+  }
+
+  /**
+   * Adds query command options.
+   *
+   * @param opt Options
+   */
+  private void addQueryCommands(Options opt) {
+    Option query = OptionBuilder.withLongOpt(QUERY)
+        .hasArg()
+        .withDescription("Queries the disk balancer " +
+            "status of a given datanode.")
+        .create();
+    getQueryOptions().addOption(query);
+    opt.addOption(query);
+
+    // Please note: Adding this only to Query options since -v is already
+    // added to global table.
+    Option verbose = OptionBuilder.withLongOpt(VERBOSE)
+        .withDescription("Prints details of the plan that is being executed " +
+            "on the node.")
+        .create();
+    getQueryOptions().addOption(verbose);
+  }
+
+  /**
+   * Adds cancel command options.
+   *
+   * @param opt Options
+   */
+  private void addCancelCommands(Options opt) {
+    Option cancel = OptionBuilder.withLongOpt(CANCEL)
+        .hasArg()
+        .withDescription("Cancels a running plan using a plan file.")
+        .create();
+    getCancelOptions().addOption(cancel);
+    opt.addOption(cancel);
+
+    Option node = OptionBuilder.withLongOpt(NODE)
+        .hasArg()
+        .withDescription("Cancels a running plan using a plan ID and hostName")
+        .create();
+
+    getCancelOptions().addOption(node);
+    opt.addOption(node);
+  }
+
+  /**
+   * Adds report command options.
+   *
+   * @param opt Options
+   */
+  private void addReportCommands(Options opt) {
+    Option report = OptionBuilder.withLongOpt(REPORT)
+        .withDescription("List nodes that will benefit from running " +
+            "DiskBalancer.")
+        .create();
+    getReportOptions().addOption(report);
+    opt.addOption(report);
+
+    Option top = OptionBuilder.withLongOpt(TOP)
+        .hasArg()
+        .withDescription("specify the number of nodes to be listed which has" +
+            " data imbalance.")
+        .create();
+    getReportOptions().addOption(top);
+    opt.addOption(top);
+
+    Option node =  OptionBuilder.withLongOpt(NODE)
+        .hasArg()
+        .withDescription("Datanode address, " +
+            "it can be DataNodeID, IP or hostname.")
+        .create();
+    getReportOptions().addOption(node);
+    opt.addOption(node);
+  }
+
+  /**
+   * This function parses all command line arguments and returns the 
appropriate
+   * values.
+   *
+   * @param argv - Argv from main
+   * @return CommandLine
+   */
+  private CommandLine parseArgs(String[] argv, Options opts)
+      throws org.apache.commons.cli.ParseException {
+    BasicParser parser = new BasicParser();
+    return parser.parse(opts, argv);
+  }
+
+  /**
+   * Dispatches calls to the right command Handler classes.
+   *
+   * @param cmd  - CommandLine
+   * @param opts options of command line
+   * @param out  the output stream used for printing
+   */
+  private int dispatch(CommandLine cmd, Options opts, final PrintStream out)
+      throws Exception {
+    Command currentCommand = null;
+    if (cmd.hasOption(DiskBalancerCLI.PLAN)) {
+      currentCommand = new PlanCommand(getConf());
+    }
+
+    if (cmd.hasOption(DiskBalancerCLI.EXECUTE)) {
+      currentCommand = new ExecuteCommand(getConf());
+    }
+
+    if (cmd.hasOption(DiskBalancerCLI.QUERY)) {
+      currentCommand = new QueryCommand(getConf());
+    }
+
+    if (cmd.hasOption(DiskBalancerCLI.CANCEL)) {
+      currentCommand = new CancelCommand(getConf());
+    }
+
+    if (cmd.hasOption(DiskBalancerCLI.REPORT)) {
+      currentCommand = new ReportCommand(getConf(), out);
+    }
+
+    if (cmd.hasOption(DiskBalancerCLI.HELP)) {
+      currentCommand = new HelpCommand(getConf());
+    }
+
+    // Invoke main help here.
+    if (currentCommand == null) {
+      new HelpCommand(getConf()).execute(null);
+      return 1;
+    }
+
+    currentCommand.execute(cmd);
+    return 0;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35c5943b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index 7d659af..451ca04 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -41,18 +41,19 @@ import 
org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
+import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
 import com.google.common.collect.Lists;
 
-import static org.apache.hadoop.hdfs.tools.DiskBalancer.CANCEL;
-import static org.apache.hadoop.hdfs.tools.DiskBalancer.HELP;
-import static org.apache.hadoop.hdfs.tools.DiskBalancer.NODE;
-import static org.apache.hadoop.hdfs.tools.DiskBalancer.PLAN;
-import static org.apache.hadoop.hdfs.tools.DiskBalancer.QUERY;
-import static org.apache.hadoop.hdfs.tools.DiskBalancer.REPORT;
+import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.CANCEL;
+import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.HELP;
+import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.NODE;
+import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.PLAN;
+import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.QUERY;
+import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.REPORT;
 
 import org.junit.Rule;
 import org.junit.rules.ExpectedException;
@@ -387,8 +388,7 @@ public class TestDiskBalancerCommand {
   private List<String> runCommandInternal(final String cmdLine) throws
       Exception {
     String[] cmds = StringUtils.split(cmdLine, ' ');
-    org.apache.hadoop.hdfs.tools.DiskBalancer db =
-        new org.apache.hadoop.hdfs.tools.DiskBalancer(conf);
+    DiskBalancerCLI db = new DiskBalancerCLI(conf);
 
     ByteArrayOutputStream bufOut = new ByteArrayOutputStream();
     PrintStream out = new PrintStream(bufOut);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to