Repository: hadoop
Updated Branches:
  refs/heads/trunk 6a6e74acf -> 730bc746f


http://git-wip-us.apache.org/repos/asf/hadoop/blob/730bc746/hadoop-tools/hadoop-gridmix/src/main/shellprofile.d/hadoop-gridmix.sh
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-gridmix/src/main/shellprofile.d/hadoop-gridmix.sh 
b/hadoop-tools/hadoop-gridmix/src/main/shellprofile.d/hadoop-gridmix.sh
new file mode 100755
index 0000000..b7887ba
--- /dev/null
+++ b/hadoop-tools/hadoop-gridmix/src/main/shellprofile.d/hadoop-gridmix.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if ! declare -f hadoop_subcommand_gridmix >/dev/null 2>/dev/null; then
+
+  if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
+    hadoop_add_subcommand "gridmix" "submit a mix of synthetic job, modeling a 
profiled from production load"
+  fi
+
+## @description  gridmix command for hadoop
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+function hadoop_subcommand_gridmix
+{
+  # shellcheck disable=SC2034
+  HADOOP_CLASSNAME=org.apache.hadoop.mapred.gridmix.Gridmix
+  hadoop_add_to_classpath_tools hadoop-rumen
+  hadoop_add_to_classpath_tools hadoop-gridmix
+}
+
+fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/730bc746/hadoop-tools/hadoop-rumen/src/main/shellprofile.d/hadoop-rumen.sh
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/shellprofile.d/hadoop-rumen.sh 
b/hadoop-tools/hadoop-rumen/src/main/shellprofile.d/hadoop-rumen.sh
new file mode 100755
index 0000000..d7d4022
--- /dev/null
+++ b/hadoop-tools/hadoop-rumen/src/main/shellprofile.d/hadoop-rumen.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if ! declare -f hadoop_subcommand_rumenfolder >/dev/null 2>/dev/null; then
+
+  if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
+    hadoop_add_subcommand "rumenfolder" "scale a rumen input trace"
+  fi
+
+## @description  rumenfolder command for hadoop
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+function hadoop_subcommand_rumenfolder
+{
+  # shellcheck disable=SC2034
+  HADOOP_CLASSNAME=org.apache.hadoop.tools.rumen.Folder
+  hadoop_add_to_classpath_tools hadoop-rumen
+  hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+}
+
+fi
+
+if ! declare -f hadoop_subcommand_rumentrace >/dev/null 2>/dev/null; then
+
+  if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
+    hadoop_add_subcommand "rumentrace" "convert logs into a rumen trace"
+  fi
+
+## @description  rumentrace command for hadoop
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+function hadoop_subcommand_rumentrace
+{
+  # shellcheck disable=SC2034
+  HADOOP_CLASSNAME=org.apache.hadoop.tools.rumen.TraceBuilder
+  hadoop_add_to_classpath_tools hadoop-rumen
+  hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+}
+
+fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/730bc746/hadoop-tools/hadoop-rumen/src/site/markdown/Rumen.md.vm
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/site/markdown/Rumen.md.vm 
b/hadoop-tools/hadoop-rumen/src/site/markdown/Rumen.md.vm
index bee976a..34dfd0b 100644
--- a/hadoop-tools/hadoop-rumen/src/site/markdown/Rumen.md.vm
+++ b/hadoop-tools/hadoop-rumen/src/site/markdown/Rumen.md.vm
@@ -50,8 +50,8 @@ but a simulation of the scheduler elects to run that task on 
a remote
 rack, the simulator requires a runtime its input cannot provide. 
 To fill in these gaps, Rumen performs a statistical analysis of the 
 digest to estimate the variables the trace doesn't supply. Rumen traces 
-drive both Gridmix (a benchmark of Hadoop MapReduce clusters) and Mumak 
-(a simulator for the JobTracker).
+drive both Gridmix (a benchmark of Hadoop MapReduce clusters) and SLS
+(a simulator for the resource manager scheduler).
 
 
 $H3 Motivation
@@ -126,16 +126,13 @@ can use the `Folder` utility to fold the current trace to 
the
 desired length. The remaining part of this section explains these 
 utilities in detail.
     
-Examples in this section assumes that certain libraries are present 
-in the java CLASSPATH. See [Dependencies](#Dependencies) for more details.
-
 
 $H3 Trace Builder
       
 $H4 Command
 
 ```
-java org.apache.hadoop.tools.rumen.TraceBuilder [options] <jobtrace-output> 
<topology-output> <inputs>
+hadoop rumentrace [options] <jobtrace-output> <topology-output> <inputs>
 ```
   
 This command invokes the `TraceBuilder` utility of *Rumen*.
@@ -205,12 +202,8 @@ $H4 Options
 
 $H4 Example
 
-*Rumen* expects certain library *JARs* to be present in  the *CLASSPATH*.
-One simple way to run Rumen is to use
-`$HADOOP_HOME/bin/hadoop jar` command to run it as example below.
-
 ```
-java org.apache.hadoop.tools.rumen.TraceBuilder \
+hadoop rumentrace \
   file:///tmp/job-trace.json \
   file:///tmp/job-topology.json \
   hdfs:///tmp/hadoop-yarn/staging/history/done_intermediate/testuser
@@ -229,7 +222,7 @@ $H3 Folder
 $H4 Command
 
 ```
-java org.apache.hadoop.tools.rumen.Folder [options] [input] [output]
+hadoop rumenfolder [options] [input] [output]
 ```
       
 This command invokes the `Folder` utility of 
@@ -350,7 +343,7 @@ $H4 Examples
 $H5 Folding an input trace with 10 hours of total runtime to generate an 
output trace with 1 hour of total runtime
 
 ```
-java org.apache.hadoop.tools.rumen.Folder \
+hadoop rumenfolder \
   -output-duration 1h \
   -input-cycle 20m \
   file:///tmp/job-trace.json \
@@ -362,7 +355,7 @@ If the folded jobs are out of order then the command will 
bail out.
 $H5 Folding an input trace with 10 hours of total runtime to generate an 
output trace with 1 hour of total runtime and tolerate some skewness
 
 ```
-java org.apache.hadoop.tools.rumen.Folder \
+hadoop rumenfolder \
   -output-duration 1h \
   -input-cycle 20m \
   -allow-missorting \
@@ -378,7 +371,7 @@ If the folded jobs are out of order, then atmost
 $H5 Folding an input trace with 10 hours of total runtime to generate an 
output trace with 1 hour of total runtime in debug mode
 
 ```
-java org.apache.hadoop.tools.rumen.Folder \
+hadoop rumenfolder \
   -output-duration 1h \
   -input-cycle 20m \
   -debug -temp-directory file:///tmp/debug \
@@ -395,7 +388,7 @@ up.
 $H5 Folding an input trace with 10 hours of total runtime to generate an 
output trace with 1 hour of total runtime with custom concentration.
 
 ```
-java org.apache.hadoop.tools.rumen.Folder \
+hadoop rumenfolder \
   -output-duration 1h \
   -input-cycle 20m \
   -concentration 2 \
@@ -421,18 +414,3 @@ Look at the MapReduce
 <a 
href="https://issues.apache.org/jira/browse/MAPREDUCE/component/12313617";>rumen-component</a>
 for further details.
 
-
-$H3 Dependencies
-
-*Rumen* expects certain library *JARs* to be present in  the *CLASSPATH*.
-One simple way to run Rumen is to use
-`hadoop jar` command to run it as example below.
-
-```
-$HADOOP_HOME/bin/hadoop jar \
-  $HADOOP_HOME/share/hadoop/tools/lib/hadoop-rumen-2.5.1.jar \
-  org.apache.hadoop.tools.rumen.TraceBuilder \
-  file:///tmp/job-trace.json \
-  file:///tmp/job-topology.json \
-  hdfs:///tmp/hadoop-yarn/staging/history/done_intermediate/testuser
-```

http://git-wip-us.apache.org/repos/asf/hadoop/blob/730bc746/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/DumpTypedBytes.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/DumpTypedBytes.java
 
b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/DumpTypedBytes.java
index 5a07cc3..ffddc7c 100644
--- 
a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/DumpTypedBytes.java
+++ 
b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/DumpTypedBytes.java
@@ -91,8 +91,7 @@ public class DumpTypedBytes implements Tool {
   }
 
   private void printUsage() {
-    System.out.println("Usage: $HADOOP_HOME/bin/hadoop jar 
hadoop-streaming.jar"
-        + " dumptb <glob-pattern>");
+    System.out.println("Usage: mapred streaming dumptb <glob-pattern>");
     System.out.println("  Dumps all files that match the given pattern to " +
         "standard output as typed bytes.");
     System.out.println("  The files can be text or sequence files");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/730bc746/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/HadoopStreaming.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/HadoopStreaming.java
 
b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/HadoopStreaming.java
index eabf46c..92f9d03 100644
--- 
a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/HadoopStreaming.java
+++ 
b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/HadoopStreaming.java
@@ -56,8 +56,7 @@ public class HadoopStreaming {
   }
   
   private static void printUsage() {
-    System.out.println("Usage: $HADOOP_HOME/bin/hadoop jar 
hadoop-streaming.jar"
-        + " [options]");
+    System.out.println("Usage: mapred streaming [options]");
     System.out.println("Options:");
     System.out.println("  dumptb <glob-pattern> Dumps all files that match 
the" 
         + " given pattern to ");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/730bc746/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/LoadTypedBytes.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/LoadTypedBytes.java
 
b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/LoadTypedBytes.java
index a7a001c..838cfa1 100644
--- 
a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/LoadTypedBytes.java
+++ 
b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/LoadTypedBytes.java
@@ -89,8 +89,7 @@ public class LoadTypedBytes implements Tool {
   }
 
   private void printUsage() {
-    System.out.println("Usage: $HADOOP_HOME/bin/hadoop jar 
hadoop-streaming.jar"
-        + " loadtb <path>");
+    System.out.println("Usage: mapred streaming loadtb <path>");
     System.out.println("  Reads typed bytes from standard input" +
     " and stores them in a sequence file in");
     System.out.println("  the specified path");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/730bc746/hadoop-tools/hadoop-streaming/src/main/shellprofile.d/hadoop-streaming.sh
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-streaming/src/main/shellprofile.d/hadoop-streaming.sh 
b/hadoop-tools/hadoop-streaming/src/main/shellprofile.d/hadoop-streaming.sh
new file mode 100755
index 0000000..cca016d
--- /dev/null
+++ b/hadoop-tools/hadoop-streaming/src/main/shellprofile.d/hadoop-streaming.sh
@@ -0,0 +1,55 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if ! declare -f mapred_subcommand_streaming >/dev/null 2>/dev/null; then
+
+  if [[ "${HADOOP_SHELL_EXECNAME}" = mapred ]]; then
+    hadoop_add_subcommand "streaming" "launch a mapreduce streaming job"
+  fi
+
+## @description  streaming command for mapred
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+function mapred_subcommand_streaming
+{
+  declare jarname
+  declare oldifs
+
+  # shellcheck disable=SC2034
+  HADOOP_CLASSNAME=org.apache.hadoop.util.RunJar
+  hadoop_add_to_classpath_tools hadoop-streaming
+
+  # locate the streaming jar so we have something to
+  # give to RunJar
+  oldifs=${IFS}
+  IFS=:
+  for jarname in ${CLASSPATH}; do
+    if [[ "${jarname}" =~ hadoop-streaming-[0-9] ]]; then
+      HADOOP_SUBCMD_ARGS=("${jarname}" "${HADOOP_SUBCMD_ARGS[@]}")
+      break
+    fi
+  done
+
+  IFS=${oldifs}
+
+  hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+
+}
+
+fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/730bc746/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm 
b/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
index cc8ed69..072a68b 100644
--- a/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
+++ b/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
@@ -62,7 +62,7 @@ Hadoop Streaming
 
 Hadoop streaming is a utility that comes with the Hadoop distribution. The 
utility allows you to create and run Map/Reduce jobs with any executable or 
script as the mapper and/or the reducer. For example:
 
-    hadoop jar hadoop-streaming-${project.version}.jar \
+    mapred streaming \
       -input myInputDirs \
       -output myOutputDir \
       -mapper /bin/cat \
@@ -88,7 +88,7 @@ Streaming supports streaming command options as well as 
[generic command options
 
 **Note:** Be sure to place the generic options before the streaming options, 
otherwise the command will fail. For an example, see [Making Archives Available 
to Tasks](#Making_Archives_Available_to_Tasks).
 
-    hadoop command [genericOptions] [streamingOptions]
+    mapred streaming [genericOptions] [streamingOptions]
 
 The Hadoop streaming command options are listed here:
 
@@ -115,7 +115,7 @@ $H3 Specifying a Java Class as the Mapper/Reducer
 
 You can supply a Java class as the mapper and/or the reducer.
 
-    hadoop jar hadoop-streaming-${project.version}.jar \
+    mapred streaming \
       -input myInputDirs \
       -output myOutputDir \
       -inputformat org.apache.hadoop.mapred.KeyValueTextInputFormat \
@@ -128,7 +128,7 @@ $H3 Packaging Files With Job Submissions
 
 You can specify any executable as the mapper and/or the reducer. The 
executables do not need to pre-exist on the machines in the cluster; however, 
if they don't, you will need to use "-file" option to tell the framework to 
pack your executable files as a part of job submission. For example:
 
-    hadoop jar hadoop-streaming-${project.version}.jar \
+    mapred streaming \
       -input myInputDirs \
       -output myOutputDir \
       -mapper myPythonScript.py \
@@ -139,7 +139,7 @@ The above example specifies a user defined Python 
executable as the mapper. The
 
 In addition to executable files, you can also package other auxiliary files 
(such as dictionaries, configuration files, etc) that may be used by the mapper 
and/or the reducer. For example:
 
-    hadoop jar hadoop-streaming-${project.version}.jar \
+    mapred streaming \
       -input myInputDirs \
       -output myOutputDir \
       -mapper myPythonScript.py \
@@ -216,7 +216,7 @@ $H4 Specifying the Number of Reducers
 
 To specify the number of reducers, for example two, use:
 
-    hadoop jar hadoop-streaming-${project.version}.jar \
+    mapred streaming \
       -D mapreduce.job.reduces=2 \
       -input myInputDirs \
       -output myOutputDir \
@@ -229,7 +229,7 @@ As noted earlier, when the Map/Reduce framework reads a 
line from the stdout of
 
 However, you can customize this default. You can specify a field separator 
other than the tab character (the default), and you can specify the nth (n \>= 
1) character rather than the first character in a line (the default) as the 
separator between the key and value. For example:
 
-    hadoop jar hadoop-streaming-${project.version}.jar \
+    mapred streaming \
       -D stream.map.output.field.separator=. \
       -D stream.num.map.output.key.fields=4 \
       -input myInputDirs \
@@ -279,7 +279,7 @@ User can specify a different symlink name for -archives 
using \#.
 
 In this example, the input.txt file has two lines specifying the names of the 
two files: cachedir.jar/cache.txt and cachedir.jar/cache2.txt. "cachedir.jar" 
is a symlink to the archived directory, which has the files "cache.txt" and 
"cache2.txt".
 
-    hadoop jar hadoop-streaming-${project.version}.jar \
+    mapred streaming \
                     -archives 
'hdfs://hadoop-nn1.example.com/user/me/samples/cachefile/cachedir.jar' \
                     -D mapreduce.job.maps=1 \
                     -D mapreduce.job.reduces=1 \
@@ -325,7 +325,7 @@ $H3 Hadoop Partitioner Class
 
 Hadoop has a library class, 
[KeyFieldBasedPartitioner](../api/org/apache/hadoop/mapred/lib/KeyFieldBasedPartitioner.html),
 that is useful for many applications. This class allows the Map/Reduce 
framework to partition the map outputs based on certain key fields, not the 
whole keys. For example:
 
-    hadoop jar hadoop-streaming-${project.version}.jar \
+    mapred streaming \
       -D stream.map.output.field.separator=. \
       -D stream.num.map.output.key.fields=4 \
       -D map.output.key.field.separator=. \
@@ -375,7 +375,7 @@ $H3 Hadoop Comparator Class
 
 Hadoop has a library class, 
[KeyFieldBasedComparator](../api/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedComparator.html),
 that is useful for many applications. This class provides a subset of features 
provided by the Unix/GNU Sort. For example:
 
-    hadoop jar hadoop-streaming-${project.version}.jar \
+    mapred streaming \
       -D 
mapreduce.job.output.key.comparator.class=org.apache.hadoop.mapreduce.lib.partition.KeyFieldBasedComparator
 \
       -D stream.map.output.field.separator=. \
       -D stream.num.map.output.key.fields=4 \
@@ -411,7 +411,7 @@ Hadoop has a library package called 
[Aggregate](../api/org/apache/hadoop/mapred/
 
 To use Aggregate, simply specify "-reducer aggregate":
 
-    hadoop jar hadoop-streaming-${project.version}.jar \
+    mapred streaming \
       -input myInputDirs \
       -output myOutputDir \
       -mapper myAggregatorForKeyCount.py \
@@ -444,7 +444,7 @@ $H3 Hadoop Field Selection Class
 
 Hadoop has a library class, 
[FieldSelectionMapReduce](../api/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.html),
 that effectively allows you to process text data like the unix "cut" utility. 
The map function defined in the class treats each input key/value pair as a 
list of fields. You can specify the field separator (the default is the tab 
character). You can select an arbitrary list of fields as the map output key, 
and an arbitrary list of fields as the map output value. Similarly, the reduce 
function defined in the class treats each input key/value pair as a list of 
fields. You can select an arbitrary list of fields as the reduce output key, 
and an arbitrary list of fields as the reduce output value. For example:
 
-    hadoop jar hadoop-streaming-${project.version}.jar \
+    mapred streaming \
       -D mapreduce.map.output.key.field.separator=. \
       -D mapreduce.partition.keypartitioner.options=-k1,2 \
       -D mapreduce.fieldsel.data.field.separator=. \
@@ -495,7 +495,7 @@ Using an alias will not work, but variable substitution is 
allowed as shown in t
     charlie 80
     dan     75
 
-    $ c2='cut -f2'; hadoop jar hadoop-streaming-${project.version}.jar \
+    $ c2='cut -f2'; mapred streaming \
       -D mapreduce.job.name='Experiment' \
       -input /user/me/samples/student_marks \
       -output /user/me/samples/student_out \
@@ -525,7 +525,7 @@ $H3 How do I specify multiple input directories?
 
 You can specify multiple input directories with multiple '-input' options:
 
-    hadoop jar hadoop-streaming-${project.version}.jar \
+    mapred streaming \
       -input '/user/foo/dir1' -input '/user/foo/dir2' \
         (rest of the command)
 
@@ -541,7 +541,7 @@ $H3 How do I parse XML documents using streaming?
 
 You can use the record reader StreamXmlRecordReader to process XML documents.
 
-    hadoop jar hadoop-streaming-${project.version}.jar \
+    mapred streaming \
       -inputreader "StreamXmlRecord,begin=BEGIN_STRING,end=END_STRING" \
         (rest of the command)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/730bc746/hadoop-yarn-project/hadoop-yarn/bin/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index cac3bb6..878c95f 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -16,7 +16,12 @@
 # limitations under the License.
 
 MYNAME="${BASH_SOURCE-$0}"
+HADOOP_SHELL_EXECNAME="${MYNAME##*/}"
 
+## @description  build up the yarn command's usage text.
+## @audience     public
+## @stability    stable
+## @replaceable  no
 function hadoop_usage
 {
   hadoop_add_option "--buildpaths" "attempt to add class files from build tree"
@@ -46,9 +51,180 @@ function hadoop_usage
   hadoop_add_subcommand "timelineserver" "run the timeline server"
   hadoop_add_subcommand "top" "view cluster information"
   hadoop_add_subcommand "version" "print the version"
-  hadoop_generate_usage "${MYNAME}" true
+  hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
 }
 
+## @description  Default command handler for yarn command
+## @audience     public
+## @stability    stable
+## @replaceable  no
+## @param        CLI arguments
+function yarncmd_case
+{
+  subcmd=$1
+  shift
+
+  case ${subcmd} in
+    application|applicationattempt|container)
+      HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.ApplicationCLI
+      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
+      set -- "${subcmd}" "$@"
+    ;;
+    classpath)
+      hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@"
+    ;;
+    cluster)
+      HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.ClusterCLI
+      hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS"
+      YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}"
+    ;;
+    daemonlog)
+      HADOOP_CLASSNAME=org.apache.hadoop.log.LogLevel
+      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
+    ;;
+    envvars)
+      echo "JAVA_HOME='${JAVA_HOME}'"
+      echo "HADOOP_YARN_HOME='${HADOOP_YARN_HOME}'"
+      echo "YARN_DIR='${YARN_DIR}'"
+      echo "YARN_LIB_JARS_DIR='${YARN_LIB_JARS_DIR}'"
+      echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'"
+      echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'"
+      echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'"
+      echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
+      exit 0
+    ;;
+    jar)
+      HADOOP_CLASSNAME=org.apache.hadoop.util.RunJar
+      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
+    ;;
+    historyserver)
+      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+      echo "DEPRECATED: Use of this command to start the timeline server is 
deprecated." 1>&2
+      echo "Instead use the timelineserver command for it." 1>&2
+      echo "Starting the History Server anyway..." 1>&2
+      
HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
+    ;;
+    logs)
+      HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.LogsCLI
+      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
+    ;;
+    node)
+      HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.NodeCLI
+      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
+    ;;
+    nodemanager)
+      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+      HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.nodemanager.NodeManager'
+      hadoop_debug "Append YARN_NODEMANAGER_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_NODEMANAGER_OPTS}"
+      # Backwards compatibility
+      if [[ -n "${YARN_NODEMANAGER_HEAPSIZE}" ]]; then
+        HADOOP_HEAPSIZE_MAX="${YARN_NODEMANAGER_HEAPSIZE}"
+      fi
+    ;;
+    proxyserver)
+      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+      
HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer'
+      hadoop_debug "Append YARN_PROXYSERVER_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_PROXYSERVER_OPTS}"
+      # Backwards compatibility
+      if [[ -n "${YARN_PROXYSERVER_HEAPSIZE}" ]]; then
+        # shellcheck disable=SC2034
+        HADOOP_HEAPSIZE_MAX="${YARN_PROXYSERVER_HEAPSIZE}"
+      fi
+    ;;
+    queue)
+      HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.QueueCLI
+      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
+    ;;
+    resourcemanager)
+      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+      
HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.resourcemanager.ResourceManager'
+      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_RESOURCEMANAGER_OPTS}"
+      hadoop_debug "Append YARN_RESOURCEMANAGER_OPTS onto HADOOP_OPTS"
+      # Backwards compatibility
+      if [[ -n "${YARN_RESOURCEMANAGER_HEAPSIZE}" ]]; then
+        # shellcheck disable=SC2034
+        HADOOP_HEAPSIZE_MAX="${YARN_RESOURCEMANAGER_HEAPSIZE}"
+      fi
+    ;;
+    rmadmin)
+      HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.RMAdminCLI'
+      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
+    ;;
+    scmadmin)
+      HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.SCMAdmin'
+      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
+    ;;
+    sharedcachemanager)
+      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+      
HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.sharedcachemanager.SharedCacheManager'
+      hadoop_debug "Append YARN_SHAREDCACHEMANAGER_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_SHAREDCACHEMANAGER_OPTS}"
+    ;;
+    timelineserver)
+      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+      
HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
+      hadoop_debug "Append YARN_TIMELINESERVER_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_TIMELINESERVER_OPTS}"
+      # Backwards compatibility
+      if [[ -n "${YARN_TIMELINESERVER_HEAPSIZE}" ]]; then
+        # shellcheck disable=SC2034
+        HADOOP_HEAPSIZE_MAX="${YARN_TIMELINESERVER_HEAPSIZE}"
+      fi
+    ;;
+    version)
+      HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
+      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
+    ;;
+    top)
+      doNotSetCols=0
+      doNotSetRows=0
+      for i in "$@"; do
+        if [[ $i == "-cols" ]]; then
+          doNotSetCols=1
+        fi
+        if [[ $i == "-rows" ]]; then
+          doNotSetRows=1
+        fi
+      done
+      if [ $doNotSetCols == 0 ] && [ -n "${TERM}" ]; then
+        cols=$(tput cols)
+        if [ -n "$cols" ]; then
+          args=( $@ )
+          args=("${args[@]}" "-cols" "$cols")
+          set -- "${args[@]}"
+        fi
+      fi
+      if [ $doNotSetRows == 0 ] && [ -n "${TERM}" ]; then
+        rows=$(tput lines)
+        if [ -n "$rows" ]; then
+          args=( $@ )
+          args=("${args[@]}" "-rows" "$rows")
+          set -- "${args[@]}"
+        fi
+      fi
+      HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.TopCLI
+      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
+    ;;
+    *)
+      HADOOP_CLASSNAME="${subcmd}"
+      if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then
+        hadoop_exit_with_usage 1
+      fi
+    ;;
+  esac
+}
 
 # let's locate libexec...
 if [[ -n "${HADOOP_HOME}" ]]; then
@@ -74,195 +250,71 @@ if [[ $# = 0 ]]; then
 fi
 
 # get arguments
-COMMAND=$1
+HADOOP_SUBCMD=$1
 shift
 
-case "${COMMAND}" in
-  application|applicationattempt|container)
-    CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
-    hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
-    set -- "${COMMAND}" "$@"
-  ;;
-  classpath)
-    hadoop_do_classpath_subcommand CLASS "$@"
-  ;;
-  cluster)
-    CLASS=org.apache.hadoop.yarn.client.cli.ClusterCLI
-    hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS"
-    YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}"
-  ;;
-  daemonlog)
-    CLASS=org.apache.hadoop.log.LogLevel
-    hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
-  ;;
-  envvars)
-    echo "JAVA_HOME='${JAVA_HOME}'"
-    echo "HADOOP_YARN_HOME='${HADOOP_YARN_HOME}'"
-    echo "YARN_DIR='${YARN_DIR}'"
-    echo "YARN_LIB_JARS_DIR='${YARN_LIB_JARS_DIR}'"
-    echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'"
-    echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'"
-    echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'"
-    echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
-    exit 0
-  ;;
-  jar)
-    CLASS=org.apache.hadoop.util.RunJar
-    hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
-  ;;
-  historyserver)
-    supportdaemonization="true"
-    echo "DEPRECATED: Use of this command to start the timeline server is 
deprecated." 1>&2
-    echo "Instead use the timelineserver command for it." 1>&2
-    echo "Starting the History Server anyway..." 1>&2
-    
CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
-  ;;
-  logs)
-    CLASS=org.apache.hadoop.yarn.client.cli.LogsCLI
-    hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
-  ;;
-  node)
-    CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI
-    hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
-  ;;
-  nodemanager)
-    supportdaemonization="true"
-    CLASS='org.apache.hadoop.yarn.server.nodemanager.NodeManager'
-    hadoop_debug "Append YARN_NODEMANAGER_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${YARN_NODEMANAGER_OPTS}"
-    # Backwards compatibility
-    if [[ -n "${YARN_NODEMANAGER_HEAPSIZE}" ]]; then
-      HADOOP_HEAPSIZE_MAX="${YARN_NODEMANAGER_HEAPSIZE}"
-    fi
-  ;;
-  proxyserver)
-    supportdaemonization="true"
-    CLASS='org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer'
-    hadoop_debug "Append YARN_PROXYSERVER_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${YARN_PROXYSERVER_OPTS}"
-    # Backwards compatibility
-    if [[ -n "${YARN_PROXYSERVER_HEAPSIZE}" ]]; then
-      HADOOP_HEAPSIZE_MAX="${YARN_PROXYSERVER_HEAPSIZE}"
-    fi
-  ;;
-  queue)
-    CLASS=org.apache.hadoop.yarn.client.cli.QueueCLI
-    hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
-  ;;
-  resourcemanager)
-    supportdaemonization="true"
-    CLASS='org.apache.hadoop.yarn.server.resourcemanager.ResourceManager'
-    HADOOP_OPTS="${HADOOP_OPTS} ${YARN_RESOURCEMANAGER_OPTS}"
-    hadoop_debug "Append YARN_RESOURCEMANAGER_OPTS onto HADOOP_OPTS"
-    # Backwards compatibility
-    if [[ -n "${YARN_RESOURCEMANAGER_HEAPSIZE}" ]]; then
-      HADOOP_HEAPSIZE_MAX="${YARN_RESOURCEMANAGER_HEAPSIZE}"
-    fi
-  ;;
-  rmadmin)
-    CLASS='org.apache.hadoop.yarn.client.cli.RMAdminCLI'
-    hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
-  ;;
-  scmadmin)
-    CLASS='org.apache.hadoop.yarn.client.SCMAdmin'
-    hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
-  ;;
-  sharedcachemanager)
-    supportdaemonization="true"
-    CLASS='org.apache.hadoop.yarn.server.sharedcachemanager.SharedCacheManager'
-    hadoop_debug "Append YARN_SHAREDCACHEMANAGER_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${YARN_SHAREDCACHEMANAGER_OPTS}"
-  ;;
-  timelineserver)
-    supportdaemonization="true"
-    
CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
-    hadoop_debug "Append YARN_TIMELINESERVER_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${YARN_TIMELINESERVER_OPTS}"
-    # Backwards compatibility
-    if [[ -n "${YARN_TIMELINESERVER_HEAPSIZE}" ]]; then
-      HADOOP_HEAPSIZE_MAX="${YARN_TIMELINESERVER_HEAPSIZE}"
-    fi
-  ;;
-  version)
-    CLASS=org.apache.hadoop.util.VersionInfo
-    hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
-  ;;
-  top)
-    doNotSetCols=0
-    doNotSetRows=0
-    for i in "$@"; do
-      if [[ $i == "-cols" ]]; then
-        doNotSetCols=1
-      fi
-      if [[ $i == "-rows" ]]; then
-        doNotSetRows=1
-      fi
-    done
-    if [ $doNotSetCols == 0 ] && [ -n "${TERM}" ]; then
-      cols=$(tput cols)
-      if [ -n "$cols" ]; then
-        args=( $@ )
-        args=("${args[@]}" "-cols" "$cols")
-        set -- "${args[@]}"
-      fi
-    fi
-    if [ $doNotSetRows == 0 ] && [ -n "${TERM}" ]; then
-      rows=$(tput lines)
-      if [ -n "$rows" ]; then
-        args=( $@ )
-        args=("${args[@]}" "-rows" "$rows")
-        set -- "${args[@]}"
-      fi
-    fi
-    CLASS=org.apache.hadoop.yarn.client.cli.TopCLI
-    hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
-  ;;
-  *)
-    CLASS="${COMMAND}"
-    if ! hadoop_validate_classname "${CLASS}"; then
-      hadoop_exit_with_usage 1
-    fi
-  ;;
-esac
+HADOOP_SUBCMD_ARGS=("$@")
+
+if declare -f yarn_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then
+  hadoop_debug "Calling dynamically: yarn_subcommand_${HADOOP_SUBCMD} 
${HADOOP_SUBCMD_ARGS[*]}"
+  "yarn_subcommand_${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
+else
+  yarncmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
+fi
 
-hadoop_verify_user "${COMMAND}"
+hadoop_verify_user "${HADOOP_SUBCMD}"
 
 if [[ ${HADOOP_SLAVE_MODE} = true ]]; then
   hadoop_common_slave_mode_execute "${HADOOP_YARN_HOME}/bin/yarn" 
"${HADOOP_USER_PARAMS[@]}"
   exit $?
 fi
 
-daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
-daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}.pid"
+if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
+  HADOOP_SECURE_USER="${HADOOP_SUBCMD_SECUREUSER}"
+  hadoop_verify_secure_prereq
+  hadoop_setup_secure_service
+  
priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
+  
priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.err"
+  
priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
+  
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
+  
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
+else
+  
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
+  
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
+fi
 
 if [[  "${HADOOP_DAEMON_MODE}" != "default" ]]; then
   # shellcheck disable=SC2034
   HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}"
-  HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.log"
+  # shellcheck disable=SC2034
+  
HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
 fi
 
 hadoop_finalize
 
-if [[ -n "${supportdaemonization}" ]]; then
-  if [[ -n "${secure_service}" ]]; then
-    hadoop_secure_daemon_handler "${HADOOP_DAEMON_MODE}" "${COMMAND}" \
-    "${CLASS}" "${daemon_pidfile}" "${daemon_outfile}" \
-    "${priv_pidfile}" "${priv_outfile}" "${priv_errfile}" "$@"
+if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = true ]]; then
+  if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
+    hadoop_secure_daemon_handler \
+      "${HADOOP_DAEMON_MODE}" \
+      "${HADOOP_SUBCMD}" \
+      "${HADOOP_CLASSNAME}" \
+      "${daemon_pidfile}" \
+      "${daemon_outfile}" \
+      "${priv_pidfile}" \
+      "${priv_outfile}" \
+      "${priv_errfile}" \
+      "${HADOOP_SUBCMD_ARGS[@]}"
   else
-    hadoop_daemon_handler "${HADOOP_DAEMON_MODE}" "${COMMAND}" "${CLASS}" \
-    "${daemon_pidfile}" "${daemon_outfile}" "$@"
+    hadoop_daemon_handler \
+      "${HADOOP_DAEMON_MODE}" \
+      "${HADOOP_SUBCMD}" \
+      "${HADOOP_CLASSNAME}" \
+      "${daemon_pidfile}" \
+      "${daemon_outfile}" \
+      "${HADOOP_SUBCMD_ARGS[@]}"
   fi
   exit $?
 else
-  hadoop_java_exec "${COMMAND}" "${CLASS}" "$@"
+  # shellcheck disable=SC2086
+  hadoop_java_exec "${HADOOP_SUBCMD}" "${HADOOP_CLASSNAME}" 
"${HADOOP_SUBCMD_ARGS[@]}"
 fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/730bc746/hadoop-yarn-project/hadoop-yarn/shellprofile.d/hadoop-yarn.sh
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/shellprofile.d/hadoop-yarn.sh 
b/hadoop-yarn-project/hadoop-yarn/shellprofile.d/hadoop-yarn.sh
new file mode 100644
index 0000000..4602293
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/shellprofile.d/hadoop-yarn.sh
@@ -0,0 +1,62 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+hadoop_add_profile yarn
+
+function _yarn_hadoop_classpath
+{
+  local i
+  #
+  # get all of the yarn jars+config in the path
+  #
+  # developers
+  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
+    for i in yarn-api yarn-common yarn-mapreduce yarn-master-worker \
+             yarn-server/yarn-server-nodemanager \
+             yarn-server/yarn-server-common \
+             yarn-server/yarn-server-resourcemanager; do
+      hadoop_add_classpath "${HADOOP_YARN_HOME}/$i/target/classes"
+    done
+
+    hadoop_add_classpath "${HADOOP_YARN_HOME}/build/test/classes"
+    hadoop_add_classpath "${HADOOP_YARN_HOME}/build/tools"
+  fi
+
+  if [[ -d "${HADOOP_YARN_HOME}/${YARN_DIR}/webapps" ]]; then
+    hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}"
+  fi
+
+  hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}"'/*'
+  hadoop_add_classpath  "${HADOOP_YARN_HOME}/${YARN_DIR}"'/*'
+}
+
+function _yarn_hadoop_finalize
+{
+  # Add YARN custom options to comamnd line in case someone actaully
+  # used these.
+  #
+  # Note that we are replacing ' ' with '\ ' so that when we exec
+  # stuff it works
+  #
+  local yld=$HADOOP_LOG_DIR
+  hadoop_translate_cygwin_path yld
+  hadoop_add_param HADOOP_OPTS yarn.log.dir "-Dyarn.log.dir=${yld}"
+  hadoop_add_param HADOOP_OPTS yarn.log.file 
"-Dyarn.log.file=${HADOOP_LOGFILE}"
+  local yhd=$HADOOP_YARN_HOME
+  hadoop_translate_cygwin_path yhd
+  hadoop_add_param HADOOP_OPTS yarn.home.dir "-Dyarn.home.dir=${yhd}"
+  hadoop_add_param HADOOP_OPTS yarn.root.logger 
"-Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/730bc746/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn.sh
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn.sh 
b/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn.sh
deleted file mode 100644
index 4aa20b1..0000000
--- a/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-hadoop_add_profile yarn
-
-function _yarn_hadoop_classpath
-{
-  local i
-  #
-  # get all of the yarn jars+config in the path
-  #
-  # developers
-  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
-    for i in yarn-api yarn-common yarn-mapreduce yarn-master-worker \
-             yarn-server/yarn-server-nodemanager \
-             yarn-server/yarn-server-common \
-             yarn-server/yarn-server-resourcemanager; do
-      hadoop_add_classpath "${HADOOP_YARN_HOME}/$i/target/classes"
-    done
-
-    hadoop_add_classpath "${HADOOP_YARN_HOME}/build/test/classes"
-    hadoop_add_classpath "${HADOOP_YARN_HOME}/build/tools"
-  fi
-
-  if [[ -d "${HADOOP_YARN_HOME}/${YARN_DIR}/webapps" ]]; then
-    hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}"
-  fi
-
-  hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}"'/*'
-  hadoop_add_classpath  "${HADOOP_YARN_HOME}/${YARN_DIR}"'/*'
-}
-
-function _yarn_hadoop_finalize
-{
-  # Add YARN custom options to comamnd line in case someone actaully
-  # used these.
-  #
-  # Note that we are replacing ' ' with '\ ' so that when we exec
-  # stuff it works
-  #
-  local yld=$HADOOP_LOG_DIR
-  hadoop_translate_cygwin_path yld
-  hadoop_add_param HADOOP_OPTS yarn.log.dir "-Dyarn.log.dir=${yld}"
-  hadoop_add_param HADOOP_OPTS yarn.log.file 
"-Dyarn.log.file=${HADOOP_LOGFILE}"
-  local yhd=$HADOOP_YARN_HOME
-  hadoop_translate_cygwin_path yhd
-  hadoop_add_param HADOOP_OPTS yarn.home.dir "-Dyarn.home.dir=${yhd}"
-  hadoop_add_param HADOOP_OPTS yarn.root.logger 
"-Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to