ACCUMULO-4490 Simplify Accumulo scripts and config

* Created accumulo-service & accumulo-cluster commands
* Updated accumulo command and improved usage
* Native libraries are not built using 'accumulo build-native'
* Accumulo config is now created using 'accumulo create-config'
* The release only contains one set of example config in 'conf/examples'
* Updated INSTALL.md to reflect script and config changes
* Accumulo falls back to conf/examples when configuring logging
* Combined several cluster-related scripts into cluster.sh
* Combined service/daemon related scripts in service.sh
* Deleted config scripts and created load-env.sh
* Moved build-native-library code from script to accumulo command
* Reduced number of directories in tarball by placing scripts in lib
  and examples, test, and proxy into new opt directory
* Updated script references in documentation
* Removed unnecessary classpath settings in core/src/test files
* Removed accumulo-site.xml in core/src/test that is not being used


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/158cf16d
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/158cf16d
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/158cf16d

Branch: refs/heads/master
Commit: 158cf16d8e0d1b463293fb2a45941d74d5f83001
Parents: 24ba556
Author: Mike Walch <mwa...@apache.org>
Authored: Fri Sep 30 15:15:30 2016 -0400
Committer: Mike Walch <mwa...@apache.org>
Committed: Tue Nov 8 15:37:04 2016 -0500

----------------------------------------------------------------------
 INSTALL.md                                      | 127 +++---
 assemble/bin/accumulo                           | 331 +++++++--------
 assemble/bin/accumulo-cluster                   | 101 +++++
 assemble/bin/accumulo-service                   | 102 +++++
 assemble/conf/examples/accumulo-metrics.xml     |   2 +-
 assemble/libexec/accumulo-watcher.sh            | 141 +++++++
 assemble/libexec/bootstrap-hdfs.sh              |  90 ++++
 assemble/libexec/check-tservers                 | 199 +++++++++
 assemble/libexec/cluster.sh                     | 330 +++++++++++++++
 assemble/libexec/config.sh                      | 408 +++++++++++++++++++
 assemble/libexec/gen-monitor-cert.sh            |  84 ++++
 assemble/libexec/load-env.sh                    | 157 +++++++
 assemble/libexec/service.sh                     | 235 +++++++++++
 assemble/libexec/templates/accumulo-env.sh      |  90 ++++
 assemble/libexec/templates/accumulo-site.xml    | 176 ++++++++
 assemble/libexec/tool.sh                        |  92 +++++
 assemble/scripts/accumulo-watcher.sh            | 133 ------
 assemble/scripts/bootstrap-config.sh            | 407 ------------------
 assemble/scripts/bootstrap-hdfs.sh              |  90 ----
 assemble/scripts/build-native-library.sh        |  74 ----
 assemble/scripts/check-tservers                 | 199 ---------
 assemble/scripts/config-server.sh               |  85 ----
 assemble/scripts/config.sh                      | 172 --------
 .../scripts/generate-monitor-certificate.sh     |  84 ----
 assemble/scripts/log-forwarder.sh               |  50 ---
 assemble/scripts/start-all.sh                   |  77 ----
 assemble/scripts/start-daemon.sh                | 167 --------
 assemble/scripts/start-here.sh                  |  78 ----
 assemble/scripts/start-server.sh                |  57 ---
 assemble/scripts/stop-all.sh                    |  69 ----
 assemble/scripts/stop-here.sh                   |  53 ---
 assemble/scripts/stop-server.sh                 |  59 ---
 assemble/scripts/tdown.sh                       |  49 ---
 assemble/scripts/templates/accumulo-env.sh      |  89 ----
 assemble/scripts/templates/accumulo-site.xml    | 176 --------
 assemble/scripts/tool.sh                        |  92 -----
 assemble/scripts/tup.sh                         |  45 --
 assemble/src/main/assemblies/component.xml      |  29 +-
 .../main/scripts/generate-example-configs.sh    |   8 +-
 .../org/apache/accumulo/core/Constants.java     |   3 +-
 .../core/client/ClientConfiguration.java        |   9 +-
 .../accumulo/core/file/rfile/PrintInfo.java     |   5 +
 .../apache/accumulo/core/util/Classpath.java    |   5 +
 .../apache/accumulo/core/util/CreateToken.java  |   5 +
 .../org/apache/accumulo/core/util/Help.java     |   5 +
 .../java/org/apache/accumulo/core/util/Jar.java |  10 +
 .../org/apache/accumulo/core/util/Version.java  |   5 +
 .../test/resources/crypto-off-accumulo-site.xml |  22 -
 .../test/resources/crypto-on-accumulo-site.xml  |  22 -
 ...rypto-on-no-key-encryption-accumulo-site.xml |  22 -
 .../resources/disabled/conf/accumulo-site.xml   | 118 ------
 .../main/asciidoc/chapters/administration.txt   | 118 +++---
 docs/src/main/asciidoc/chapters/clients.txt     |  18 +-
 .../asciidoc/chapters/development_clients.txt   |   2 +-
 .../main/asciidoc/chapters/iterator_design.txt  |  10 +-
 docs/src/main/asciidoc/chapters/kerberos.txt    |   6 +-
 docs/src/main/asciidoc/chapters/shell.txt       |   2 +-
 .../asciidoc/chapters/table_configuration.txt   |   5 +-
 .../main/asciidoc/chapters/troubleshooting.txt  |  57 ++-
 docs/src/main/resources/administration.html     |  50 ++-
 docs/src/main/resources/examples/README         |   4 +-
 .../main/resources/examples/README.bulkIngest   |   2 +-
 .../main/resources/examples/README.classpath    |   2 +-
 .../src/main/resources/examples/README.filedata |   4 +-
 docs/src/main/resources/examples/README.mapred  |   8 +-
 docs/src/main/resources/examples/README.regex   |   2 +-
 docs/src/main/resources/examples/README.rowhash |   2 +-
 docs/src/main/resources/examples/README.shard   |   1 -
 .../main/resources/examples/README.tabletofile  |   2 +-
 .../src/main/resources/examples/README.terasort |   2 +-
 .../minicluster/impl/MiniClusterExecutable.java |   5 +
 .../StandaloneClusterControlTest.java           |   2 +-
 proxy/README                                    |   2 +-
 .../java/org/apache/accumulo/proxy/Proxy.java   |   5 +
 .../org/apache/accumulo/server/Accumulo.java    |  11 +-
 .../accumulo/server/conf/ConfigSanityCheck.java |   5 +
 .../apache/accumulo/server/init/Initialize.java |   5 +
 .../server/metrics/MetricsConfiguration.java    |   7 +-
 .../org/apache/accumulo/server/util/Admin.java  |   5 +
 .../org/apache/accumulo/server/util/Info.java   |   5 +
 .../accumulo/server/util/LoginProperties.java   |   5 +
 .../accumulo/server/util/ZooKeeperMain.java     |   5 +
 .../apache/accumulo/server/AccumuloTest.java    |   2 +-
 .../org/apache/accumulo/gc/GCExecutable.java    |   5 +
 .../accumulo/master/MasterExecutable.java       |   5 +
 .../accumulo/monitor/MonitorExecutable.java     |   5 +
 .../accumulo/tracer/TracerExecutable.java       |   5 +
 .../org/apache/accumulo/tserver/NativeMap.java  |   8 +-
 .../accumulo/tserver/TServerExecutable.java     |   5 +
 .../java/org/apache/accumulo/shell/Shell.java   |   5 +
 .../java/org/apache/accumulo/start/Main.java    |  39 +-
 .../start/classloader/AccumuloClassLoader.java  |   3 -
 .../classloader/vfs/AccumuloVFSClassLoader.java |   2 +-
 .../accumulo/start/spi/KeywordExecutable.java   |  16 +-
 .../accumulo/test/start/KeywordStartIT.java     |   5 +
 test/system/continuous/master-agitator.pl       |   4 +-
 test/system/continuous/run-moru.sh              |   2 +-
 test/system/continuous/run-verify.sh            |   2 +-
 test/system/merkle-replication/README           |   2 +-
 test/system/scalability/run.py                  |   6 +-
 test/system/upgrade_test.sh                     |   8 +-
 101 files changed, 2824 insertions(+), 2902 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/158cf16d/INSTALL.md
----------------------------------------------------------------------
diff --git a/INSTALL.md b/INSTALL.md
index 449152e..02ad3ac 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -15,8 +15,7 @@ See the License for the specific language governing 
permissions and
 limitations under the License.
 -->
 
-Installing Accumulo
-===================
+# Installing Accumulo
 
 This document covers installing Accumulo on single and multi-node environments.
 Either [download][1] or [build][2] a binary distribution of Accumulo from
@@ -26,33 +25,41 @@ source code.  Unpack as follows.
     tar xzf <some dir>/accumulo-X.Y.Z-bin.tar.gz
     cd accumulo-X.Y.Z
 
+There are three scripts in the the `bin/` directory that are used to manage 
Accumulo:
+
+1. `accumulo` - Runs Accumulo command-line tools and starts Accumulo processes
+2. `accumulo-service` - Runs Accumulo processes as services
+3. `accumulo-cluster` - Manages Accumulo cluster on a single node or several 
nodes
+
+These scripts will be used in the remaining instructions to configure and run 
Accumulo.
+For convenience, consider adding `accumulo-X.Y.Z/bin/` to your shell's path.
+
+## Configuring
+
 Accumulo has some optional native code that improves its performance and
-stability.  Before configuring Accumulo attempt to build this native code
+stability. Before configuring Accumulo, attempt to build this native code
 with the following command.
 
-    ./bin/build_native_library.sh
-
-If the command fails, its ok to continue with setup and resolve the issue
-later.
+    accumulo build-native
 
+If the command fails, its OK to continue with setup and resolve the issue 
later.
 
-Configuring
------------
+Run the command below to create configuration for Accumulo in `conf/`:
 
-The Accumulo conf directory needs to be populated with initial config files.
-The following script is provided to assist with this.  Run the script and
-answer the questions.  When the script ask about memory-map type, choose Native
-if the build native script was successful.  Otherwise choose Java.
+    accumulo create-config
 
-    ./bin/bootstrap_config.sh
+The script will ask you questions about your set up. Below are some 
suggestions:
 
-The script will prompt for memory usage.   Please note that the footprints are
-only for the Accumulo system processes, so ample space should be left for other
-processes like hadoop, zookeeper, and the accumulo client code.  If Accumulo
-worker processes are swapped out and unresponsive, they may be killed.
+* When the script asks about memory-map type, choose Native if the build 
native script 
+  was successful. Otherwise, choose Java.
+* The script will prompt for memory usage. Please note that the footprints are
+  only for the Accumulo system processes, so ample space should be left for 
other
+  processes like Hadoop, Zookeeper, and the Accumulo client code.  If Accumulo
+  worker processes are swapped out and unresponsive, they may be killed.
 
-After this script runs, the conf directory should be populated and now a few
-edits are needed.
+After the `create-config` command is run, the `conf/` directory will contain
+`accumulo-env.sh`, `accumulo-site.xml`, and few a additional files. These 
files require
+a few edits before starting Accumulo.
 
 ### Secret
 
@@ -83,13 +90,12 @@ classpath` to print out info about where Accumulo is 
finding jars.  If the
 settings mentioned above are correct, then inspect `general.classpaths` in
 `conf/accumulo-site.xml`.
 
-Initialization
---------------
+## Initialization
 
 Accumulo needs to initialize the locations where it stores data in Zookeeper
 and HDFS.  The following command will do this.
 
-    ./bin/accumulo init
+    accumulo init
 
 The initialization command will prompt for the following information.
 
@@ -99,13 +105,43 @@ The initialization command will prompt for the following 
information.
    prompts for its password.  This information will be needed to later connect
    to Accumulo.
 
-Multiple Nodes
---------------
+## Run Accumulo
+
+There are several methods for running Accumulo:
+
+1. Run individual Accumulo services using `accumulo-service`. Useful if you are
+   using a cluster management tool (i.e Ansible, Salt, etc) or init.d scripts 
to
+   start Accumulo.
+
+2. Run an Accumulo cluster on one or more nodes using `accumulo-cluster` (which
+   uses `accumulo-service` to run servcies). Useful for local development and
+   testing or if you are not using a cluster management tool in production.
+
+Each method above has instructions below.
 
-Skip this section if running Accumulo on a single node.  Accumulo has
-coordinating, monitoring, and worker processes that run on specified nodes in
-the cluster.  The following files should be populated with a newline separated
-list of node names.  Must change from localhost.
+### Run Accumulo services
+
+Start Accumulo services (tserver, master, monitor, etc) using command below:
+
+    accumulo-service tserver start
+
+### Run an Accumulo cluster
+
+Before using the `accumulo-cluster` script, additional configuration files need
+to be created. Use the command below to create them:
+
+    accumulo-cluster create-config
+
+This creates five files (`masters`, `gc`, `monitor`, `tservers`, & `tracers`)
+in the `conf/` directory that contain the node names where Accumulo services
+are run on your cluster. By default, all files are configured to `localhost`. 
If
+you are running a single-node Accumulo cluster, theses files do not need to be
+changed and the next section should be skipped.
+
+#### Multi-node configuration
+
+If you are running an Accumulo cluster on multiple nodes, the following files
+should be configured with a newline seperated list of node names:
 
  * `conf/masters` : Accumulo primary coordinating process.  Must specify one
                     node.  Can specify a few for fault tolerance.
@@ -118,37 +154,36 @@ list of node names.  Must change from localhost.
 
 The Accumulo, Hadoop, and Zookeeper software should be present at the same
 location on every node.  Also the files in the `conf` directory must be copied
-to every node.  There are many ways to replicate the software and
-configuration, two possible tools that can help replicate software and/or
-config are [pdcp][5] and [prsync][6].
+to every node. There are many ways to replicate the software and configuration,
+two possible tools that can help replicate software and/or config are [pdcp][5]
+and [prsync][6].
 
-Starting Accumulo
------------------
+The `accumulo-cluster` script uses ssh to start processes on remote nodes. 
Before
+attempting to start Accumulo, [passwordless ssh][7] must be setup on the 
cluster.
 
-The Accumulo scripts use ssh to start processes on remote nodes.  Before
-attempting to start Accumulo, [passwordless ssh][7] must be setup on the
-cluster.
+#### Start cluster
 
 After configuring and initializing Accumulo, use the following command to start
-it.
+the cluster:
 
-    ./bin/start-all.sh
+    accumulo-cluster start
 
-First steps
------------
+## First steps
 
-Once the `start-all.sh` script completes, use the following command to run the
-Accumulo shell.
+Once you have started Accumulo, use the following command to run the Accumulo 
shell:
 
-    ./bin/accumulo shell -u root
+    accumulo shell -u root
 
 Use your web browser to connect the Accumulo monitor page on port 9995.
 
     http://<hostname in conf/monitor>:9995/
 
-When finished, use the following command to stop Accumulo.
+## Stopping Accumulo
+
+When finished, use the following commands to stop Accumulo:
 
-    ./bin/stop-all.sh
+* Stop Accumulo service: `accumulo-service tserver stop`
+* Stop Accumulo cluster: `accumulo-cluster stop`
 
 [1]: http://accumulo.apache.org/
 [2]: README.md#building-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/158cf16d/assemble/bin/accumulo
----------------------------------------------------------------------
diff --git a/assemble/bin/accumulo b/assemble/bin/accumulo
index a0fc17b..1d0e704 100755
--- a/assemble/bin/accumulo
+++ b/assemble/bin/accumulo
@@ -15,173 +15,186 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Start: Resolve Script Directory
-SOURCE="${BASH_SOURCE[0]}"
-while [ -h "${SOURCE}" ]; do # resolve $SOURCE until the file is no longer a 
symlink
-   bin="$( cd -P "$( dirname "${SOURCE}" )" && pwd )"
-   SOURCE="$(readlink "${SOURCE}")"
-   [[ "${SOURCE}" != /* ]] && SOURCE="${bin}/${SOURCE}" # if $SOURCE was a 
relative symlink, we need to resolve it relative to the path where the symlink 
file was located
-done
-bin="$( cd -P "$( dirname "${SOURCE}" )" && pwd )"
-script=$( basename "${SOURCE}" )
-# Stop: Resolve Script Directory
+function build_native() {
+  native_tarball="$basedir/lib/accumulo-native.tar.gz"
+  final_native_target="$basedir/lib/native"
 
-. "${bin}"/config.sh
+  if [[ ! -f $native_tarball ]]; then
+      echo "Could not find native code artifact: ${native_tarball}";
+      exit 1
+  fi
 
-START_JAR="${ACCUMULO_HOME}/lib/accumulo-start.jar"
+  # Make the destination for the native library
+  mkdir -p "${final_native_target}" || exit 1
 
-#
-# Resolve a program to its installation directory
-#
-locationByProgram()
-{
-   RESULT=$( which "$1" )
-   if [[ "$?" != 0 && -z "${RESULT}" ]]; then
-      echo "Cannot find '$1' and '$2' is not set in 
$ACCUMULO_CONF_DIR/accumulo-env.sh"
+  # Make a directory for us to unpack the native source into
+  TMP_DIR=$(mktemp -d /tmp/accumulo-native.XXXX) || exit 1
+
+  # Unpack the tarball to our temp directory
+  if ! tar xf "${native_tarball}" -C "${TMP_DIR}"
+  then
+      echo "Failed to unpack native tarball to ${TMP_DIR}"
       exit 1
-   fi
-   while [ -h "${RESULT}" ]; do # resolve $RESULT until the file is no longer 
a symlink
-      DIR="$( cd -P "$( dirname "$RESULT" )" && pwd )"
-      RESULT="$(readlink "${RESULT}")"
-      [[ "${RESULT}" != /* ]] && RESULT="${DIR}/${RESULT}" # if $RESULT was a 
relative symlink, we need to resolve it relative to the path where the symlink 
file was located
-   done
-   # find the relative home directory, accounting for an extra bin directory
-   RESULT=$(dirname "$(dirname "${RESULT}")")
-   echo "Auto-set ${2} to '${RESULT}'.  To suppress this message, set ${2} in 
conf/accumulo-env.sh"
-   eval "${2}=${RESULT}"
-}
+  fi
 
-test -z "${JAVA_HOME}"      && locationByProgram java JAVA_HOME
-test -z "${HADOOP_PREFIX}"  && locationByProgram hadoop HADOOP_PREFIX
-test -z "${ZOOKEEPER_HOME}" && locationByProgram zkCli.sh ZOOKEEPER_HOME
+  # Move to the first (only) directory in our unpacked tarball
+  native_dir=$(find "${TMP_DIR}" -maxdepth 1 -mindepth 1 -type d)
 
-DEFAULT_GENERAL_JAVA_OPTS=""
+  cd "${native_dir}" || exit 1
 
-#
-# ACCUMULO_XTRAJARS is where all of the commandline -add items go into for 
reading by accumulo.
-# It also holds the JAR run with the jar command and, if possible, any items 
in the JAR manifest's Class-Path.
-#
-if [ "$1" = "-add" ] ; then
-    export ACCUMULO_XTRAJARS="$2"
-    shift 2
-else
-    export ACCUMULO_XTRAJARS=""
-fi
-if [ "$1" = "jar" -a -f "$2" ] ; then
-    if [[ $2 =~ ^/ ]]; then
-      jardir="$(dirname "$2")"
-      jarfile="$2"
-    else
-      jardir="$(pwd)"
-      jarfile="${jardir}/${2}"
-    fi
-    if jar tf "$jarfile" | grep -q META-INF/MANIFEST.MF ; then
-      cp="$(unzip -p "$jarfile" META-INF/MANIFEST.MF | grep ^Class-Path: | sed 
's/^Class-Path: *//')"
-      if [[ -n "$cp" ]] ; then
-         for j in $cp; do
-            if [[ "$j" != "Class-Path:" ]] ; then
-               ACCUMULO_XTRAJARS="${jardir}/${j},$ACCUMULO_XTRAJARS"
-            fi
-         done
-      fi
-    fi
-    ACCUMULO_XTRAJARS="${jarfile},$ACCUMULO_XTRAJARS"
-fi
+  # Make the native library
+  export USERFLAGS="$@"
+  if ! make
+  then
+      echo "Make failed!"
+      exit 1
+  fi
 
-#
-# Set up -D switches for JAAS and Kerberos if env variables set
-#
-if [[ ! -z ${ACCUMULO_JAAS_CONF} ]]; then
-  ACCUMULO_GENERAL_OPTS="${ACCUMULO_GENERAL_OPTS} 
-Djava.security.auth.login.config=${ACCUMULO_JAAS_CONF}"
-fi
+  # "install" the artifact
+  cp libaccumulo.* "${final_native_target}" || exit 1
 
-if [[ ! -z ${ACCUMULO_KRB5_CONF} ]]; then
-  ACCUMULO_GENERAL_OPTS="${ACCUMULO_GENERAL_OPTS} 
-Djava.security.krb5.conf=${ACCUMULO_KRB5_CONF}"
-fi
+  # Clean up our temp directory
+  rm -rf "${TMP_DIR}"
 
-#
-# Add appropriate options for process type
-#
-case "$1" in
-master)  export ACCUMULO_OPTS="${ACCUMULO_GENERAL_OPTS} 
${ACCUMULO_MASTER_OPTS}" ;;
-gc)      export ACCUMULO_OPTS="${ACCUMULO_GENERAL_OPTS} ${ACCUMULO_GC_OPTS}" ;;
-tserver*) export ACCUMULO_OPTS="${ACCUMULO_GENERAL_OPTS} 
${ACCUMULO_TSERVER_OPTS}" ;;
-monitor) export ACCUMULO_OPTS="${ACCUMULO_GENERAL_OPTS} 
${ACCUMULO_MONITOR_OPTS}" ;;
-shell)   export ACCUMULO_OPTS="${ACCUMULO_GENERAL_OPTS} 
${ACCUMULO_SHELL_OPTS}" ;;
-*)       export ACCUMULO_OPTS="${ACCUMULO_GENERAL_OPTS} 
${ACCUMULO_OTHER_OPTS}" ;;
-esac
-
-XML_FILES="${ACCUMULO_CONF_DIR}"
-LOG4J_JAR=$(find -H "${HADOOP_PREFIX}/lib" 
"${HADOOP_PREFIX}"/share/hadoop/common/lib -name 'log4j*.jar' -print 
2>/dev/null | head -1)
-SLF4J_JARS="${ACCUMULO_HOME}/lib/slf4j-api.jar:${ACCUMULO_HOME}/lib/slf4j-log4j12.jar"
-
-# The `find` command could fail for environmental reasons or bad configuration
-# Avoid trying to run Accumulo when we can't find the jar
-if [ -z "${LOG4J_JAR}" -a -z "${CLASSPATH}" ]; then
-   echo "Could not locate Log4j jar in Hadoop installation at ${HADOOP_PREFIX}"
-   exit 1
-fi
-
-CLASSPATH="${XML_FILES}:${START_JAR}:${SLF4J_JARS}:${LOG4J_JAR}:${CLASSPATH}"
-
-if [ -z "${JAVA_HOME}" -o ! -d "${JAVA_HOME}" ]; then
-   echo "JAVA_HOME is not set or is not a directory.  Please make sure it's 
set globally or in conf/accumulo-env.sh"
-   exit 1
-fi
-if [ -z "${HADOOP_PREFIX}" -o ! -d "${HADOOP_PREFIX}" ]; then
-   echo "HADOOP_PREFIX is not set or is not a directory.  Please make sure 
it's set globally or in conf/accumulo-env.sh"
-   exit 1
-fi
-if [ -z "${ZOOKEEPER_HOME}" -o ! -d "${ZOOKEEPER_HOME}" ]; then
-   echo "ZOOKEEPER_HOME is not set or is not a directory.  Please make sure 
it's set globally or in conf/accumulo-env.sh"
-   exit 1
-fi
-
-# This is default for hadoop 2.x;
-#   for another distribution, specify (DY)LD_LIBRARY_PATH
-#   explicitly in ${ACCUMULO_HOME}/conf/accumulo-env.sh
-#   usually something like:
-#     ${HADOOP_PREFIX}/lib/native/${PLATFORM}
-if [ -e "${HADOOP_PREFIX}/lib/native/libhadoop.so" ]; then
-   LIB_PATH="${HADOOP_PREFIX}/lib/native"
-   LD_LIBRARY_PATH="${LIB_PATH}:${LD_LIBRARY_PATH}"     # For Linux
-   DYLD_LIBRARY_PATH="${LIB_PATH}:${DYLD_LIBRARY_PATH}" # For Mac
-fi
-
-# Export the variables just in case they are not exported
-# This makes them available to java
-export JAVA_HOME HADOOP_PREFIX ZOOKEEPER_HOME LD_LIBRARY_PATH DYLD_LIBRARY_PATH
-
-# Strip the instance from $1
-APP=$1
-# Avoid setting an instance unless it's necessary to ensure consistency in 
filenames
-INSTANCE=""
-# Avoid setting a pointless system property
-INSTANCE_OPT=""
-if [[ "$1" =~ ^tserver-[1-9][0-9]*$ ]]; then
-  APP="$(echo "$1" | cut -d'-' -f1)"
-  # Appending the trailing underscore to make single-tserver deploys look how 
they did
-  INSTANCE="$(echo "$1" | cut -d'-' -f2)_"
-
-  #Rewrite the input arguments
-  set -- "$APP" "${@:2}"
-
-  # The extra system property we'll pass to the java cmd
-  INSTANCE_OPT="-Daccumulo.service.instance=${INSTANCE}"
-fi
+  echo "Successfully installed native library"
+}
 
-#
-# app isn't used anywhere, but it makes the process easier to spot when 
ps/top/snmp truncate the command line
-JAVA="${JAVA_HOME}/bin/java"
-exec "$JAVA" "-Dapp=$1" \
-   $INSTANCE_OPT \
-   $ACCUMULO_OPTS \
-   -classpath "${CLASSPATH}" \
-   -XX:OnOutOfMemoryError="${ACCUMULO_KILL_CMD:-kill -9 %p}" \
-   -XX:-OmitStackTraceInFastThrow \
-   
-Djavax.xml.parsers.DocumentBuilderFactory=com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl
 \
-   -Dorg.apache.accumulo.core.home.dir="${ACCUMULO_HOME}" \
-   -Dhadoop.home.dir="${HADOOP_PREFIX}" \
-   -Dzookeeper.home.dir="${ZOOKEEPER_HOME}" \
-   org.apache.accumulo.start.Main \
-   "$@"
+function main() {
+
+  # Start: Resolve Script Directory
+  SOURCE="${BASH_SOURCE[0]}"
+  while [ -h "${SOURCE}" ]; do # resolve $SOURCE until the file is no longer a 
symlink
+     bin="$( cd -P "$( dirname "${SOURCE}" )" && pwd )"
+     SOURCE="$(readlink "${SOURCE}")"
+     [[ "${SOURCE}" != /* ]] && SOURCE="${bin}/${SOURCE}" # if $SOURCE was a 
relative symlink, we need to resolve it relative to the path where the symlink 
file was located
+  done
+  bin="$( cd -P "$( dirname "${SOURCE}" )" && pwd )"
+  basedir=$( cd -P "${bin}"/.. && pwd )
+  # Stop: Resolve Script Directory
+
+  if [[ "$1" == "create-config" ]]; then
+    "$basedir/libexec/config.sh" "${@:2}"
+    exit 0
+  elif [[ "$1" == "build-native" ]]; then
+    build_native ${*:2}
+    exit 0
+  fi
+
+  source "$basedir"/libexec/load-env.sh
+
+  # ACCUMULO_XTRAJARS is where all of the commandline -add items go into for 
reading by accumulo.
+  # It also holds the JAR run with the jar command and, if possible, any items 
in the JAR manifest's Class-Path.
+  if [[ "$1" = "-add" ]]; then
+      export ACCUMULO_XTRAJARS="$2"
+      shift 2
+  else
+      export ACCUMULO_XTRAJARS=""
+  fi
+  if [[ "$1" = "jar" && -f "$2" ]]; then
+      if [[ $2 =~ ^/ ]]; then
+        jardir="$(dirname "$2")"
+        jarfile="$2"
+      else
+        jardir="$(pwd)"
+        jarfile="${jardir}/${2}"
+      fi
+      if jar tf "$jarfile" | grep -q META-INF/MANIFEST.MF ; then
+        cp="$(unzip -p "$jarfile" META-INF/MANIFEST.MF | grep ^Class-Path: | 
sed 's/^Class-Path: *//')"
+        if [[ -n "$cp" ]] ; then
+           for j in $cp; do
+              if [[ "$j" != "Class-Path:" ]] ; then
+                 ACCUMULO_XTRAJARS="${jardir}/${j},$ACCUMULO_XTRAJARS"
+              fi
+           done
+        fi
+      fi
+      ACCUMULO_XTRAJARS="${jarfile},$ACCUMULO_XTRAJARS"
+  fi
+
+  # Set up -D switches for JAAS and Kerberos if files exist
+  if [[ -f ${ACCUMULO_JAAS_CONF} ]]; then
+    ACCUMULO_GENERAL_OPTS="${ACCUMULO_GENERAL_OPTS} 
-Djava.security.auth.login.config=${ACCUMULO_JAAS_CONF}"
+  fi
+  if [[ -f ${ACCUMULO_KRB5_CONF} ]]; then
+    ACCUMULO_GENERAL_OPTS="${ACCUMULO_GENERAL_OPTS} 
-Djava.security.krb5.conf=${ACCUMULO_KRB5_CONF}"
+  fi
+
+  # Add appropriate options for process type
+  case "$1" in
+  master)  export ACCUMULO_OPTS="${ACCUMULO_GENERAL_OPTS} 
${ACCUMULO_MASTER_OPTS}" ;;
+  gc)      export ACCUMULO_OPTS="${ACCUMULO_GENERAL_OPTS} ${ACCUMULO_GC_OPTS}" 
;;
+  tserver*) export ACCUMULO_OPTS="${ACCUMULO_GENERAL_OPTS} 
${ACCUMULO_TSERVER_OPTS}" ;;
+  monitor) export ACCUMULO_OPTS="${ACCUMULO_GENERAL_OPTS} 
${ACCUMULO_MONITOR_OPTS}" ;;
+  shell)   export ACCUMULO_OPTS="${ACCUMULO_GENERAL_OPTS} 
${ACCUMULO_SHELL_OPTS}" ;;
+  *)       export ACCUMULO_OPTS="${ACCUMULO_GENERAL_OPTS} 
${ACCUMULO_OTHER_OPTS}" ;;
+  esac
+
+  LOG4J_JAR=$(find -H "${HADOOP_PREFIX}/lib" 
"${HADOOP_PREFIX}"/share/hadoop/common/lib -name 'log4j*.jar' -print 
2>/dev/null | head -1)
+  
SLF4J_JARS="${ACCUMULO_LIB_DIR}/slf4j-api.jar:${ACCUMULO_LIB_DIR}/slf4j-log4j12.jar"
+
+  # The `find` command could fail for environmental reasons or bad 
configuration
+  # Avoid trying to run Accumulo when we can't find the jar
+  if [[ -z "${LOG4J_JAR}" && -z "${CLASSPATH}" ]]; then
+     echo "Could not locate Log4j jar in Hadoop installation at 
${HADOOP_PREFIX}"
+     exit 1
+  fi
+
+  if [[ ! " gc master monitor tserver tracer " =~ " $1 " ]]; then
+    if [[ -f ${ACCUMULO_CONF_DIR}/log4j.properties ]]; then
+      export ACCUMULO_OPTS="${ACCUMULO_OPTS} 
-Dlog4j.configuration=file:${ACCUMULO_CONF_DIR}/log4j.properties"
+    else
+      export ACCUMULO_OPTS="${ACCUMULO_OPTS} 
-Dlog4j.configuration=file:${ACCUMULO_CONF_DIR}/examples/log4j.properties"
+    fi
+  fi
+
+  
CLASSPATH="${ACCUMULO_LIB_DIR}/accumulo-start.jar:${ACCUMULO_CONF_DIR}:${SLF4J_JARS}:${LOG4J_JAR}:${CLASSPATH}"
+
+  # This is default for hadoop 2.x;
+  #   for another distribution, specify (DY)LD_LIBRARY_PATH
+  #   explicitly in ${ACCUMULO_CONF_DIR}/accumulo-env.sh
+  #   usually something like:
+  #     ${HADOOP_PREFIX}/lib/native/${PLATFORM}
+  if [ -e "${HADOOP_PREFIX}/lib/native/libhadoop.so" ]; then
+     LIB_PATH="${HADOOP_PREFIX}/lib/native"
+     LD_LIBRARY_PATH="${LIB_PATH}:${LD_LIBRARY_PATH}"     # For Linux
+     DYLD_LIBRARY_PATH="${LIB_PATH}:${DYLD_LIBRARY_PATH}" # For Mac
+  fi
+
+  # Export the variables just in case they are not exported
+  # This makes them available to java
+  export JAVA_HOME HADOOP_PREFIX ZOOKEEPER_HOME LD_LIBRARY_PATH 
DYLD_LIBRARY_PATH
+
+  # Strip the instance from $1
+  APP=$1
+  # Avoid setting an instance unless it's necessary to ensure consistency in 
filenames
+  INSTANCE=""
+  # Avoid setting a pointless system property
+  INSTANCE_OPT=""
+  if [[ "$1" =~ ^tserver-[1-9][0-9]*$ ]]; then
+    APP="$(echo "$1" | cut -d'-' -f1)"
+    # Appending the trailing underscore to make single-tserver deploys look 
how they did
+    INSTANCE="$(echo "$1" | cut -d'-' -f2)_"
+
+    #Rewrite the input arguments
+    set -- "$APP" "${@:2}"
+
+    # The extra system property we'll pass to the java cmd
+    INSTANCE_OPT="-Daccumulo.service.instance=${INSTANCE}"
+  fi
+
+  # app isn't used anywhere, but it makes the process easier to spot when 
ps/top/snmp truncate the command line
+  JAVA="${JAVA_HOME}/bin/java"
+  exec "$JAVA" "-Dapp=$1" \
+     $INSTANCE_OPT \
+     "${ACCUMULO_OPTS_ARRAY[@]}" \
+     -classpath "${CLASSPATH}" \
+     -XX:OnOutOfMemoryError="${ACCUMULO_KILL_CMD:-kill -9 %p}" \
+     -XX:-OmitStackTraceInFastThrow \
+     
-Djavax.xml.parsers.DocumentBuilderFactory=com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl
 \
+     -Dhadoop.home.dir="${HADOOP_PREFIX}" \
+     -Dzookeeper.home.dir="${ZOOKEEPER_HOME}" \
+     org.apache.accumulo.start.Main \
+     "$@"
+}
+
+main "$@"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/158cf16d/assemble/bin/accumulo-cluster
----------------------------------------------------------------------
diff --git a/assemble/bin/accumulo-cluster b/assemble/bin/accumulo-cluster
new file mode 100755
index 0000000..f7f44ac
--- /dev/null
+++ b/assemble/bin/accumulo-cluster
@@ -0,0 +1,101 @@
+#! /usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+function print_usage {
+  cat <<EOF
+Usage: accumulo-cluster <command> (<argument> ...)
+
+Commands:
+  create-config   Creates cluster config
+  start           Starts Accumulo cluster
+  stop            Stops Accumulo cluster
+
+EOF
+  exit 1
+}
+
+function invalid_args {
+  echo -e "Invalid arguments: $1\n"
+  print_usage
+  exit 1
+}
+
+function verify_dir {
+  if [[ ! -d $1 ]]; then
+    echo "Directory does not exist: $1"
+    exit 1
+  fi
+}
+
+function verify_file {
+  if [[ ! -f $1 ]]; then
+    echo "File does not exist: $1"
+    exit 1
+  fi
+}
+
+function copy_file {
+  verify_file "$1"
+  verify_dir "$2"
+  if ! cp "$1" "$2"
+  then
+    echo "Failed to copy file '$1' to '$2'"
+    exit 1
+  fi
+}
+
+function main() {
+
+  if [[ -z $1 ]]; then
+    invalid_args "<command> cannot be empty"
+  fi
+
+  # Resolve base directory
+  SOURCE="${BASH_SOURCE[0]}"
+  while [ -h "${SOURCE}" ]; do
+     bin="$( cd -P "$( dirname "${SOURCE}" )" && pwd )"
+     SOURCE="$(readlink "${SOURCE}")"
+     [[ "${SOURCE}" != /* ]] && SOURCE="${bin}/${SOURCE}"
+  done
+  bin="$( cd -P "$( dirname "${SOURCE}" )" && pwd )"
+  basedir=$( cd -P "${bin}"/.. && pwd )
+
+  source "$basedir"/libexec/load-env.sh
+
+  verify_file "${ACCUMULO_LIBEXEC_DIR}/cluster.sh"
+
+  case "$1" in
+    create-config)
+      copy_file "${ACCUMULO_CONF_DIR}/examples/gc" "${ACCUMULO_CONF_DIR}"
+      copy_file "${ACCUMULO_CONF_DIR}/examples/masters" "${ACCUMULO_CONF_DIR}"
+      copy_file "${ACCUMULO_CONF_DIR}/examples/monitor" "${ACCUMULO_CONF_DIR}"
+      copy_file "${ACCUMULO_CONF_DIR}/examples/tservers" "${ACCUMULO_CONF_DIR}"
+      copy_file "${ACCUMULO_CONF_DIR}/examples/tracers" "${ACCUMULO_CONF_DIR}"
+      ;;
+    start)
+      "${ACCUMULO_LIBEXEC_DIR}"/cluster.sh start-all
+      ;;
+    stop)
+      "${ACCUMULO_LIBEXEC_DIR}"/cluster.sh stop-all
+      ;;
+    *)
+      invalid_args "'$1' is an invalid <command>"
+      ;;
+  esac
+}
+
+main "$@"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/158cf16d/assemble/bin/accumulo-service
----------------------------------------------------------------------
diff --git a/assemble/bin/accumulo-service b/assemble/bin/accumulo-service
new file mode 100755
index 0000000..90a310e
--- /dev/null
+++ b/assemble/bin/accumulo-service
@@ -0,0 +1,102 @@
+#! /usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+function print_usage {
+  cat <<EOF
+Usage: accumulo-service <service> <command>
+
+Services:
+  gc          Accumulo garbage collector
+  monitor     Accumulo monitor
+  master      Accumulo master
+  tserver     Accumulo tserver
+  tracer      Accumulo tracter
+
+Commands:
+  start       Start service
+  stop        Stop service
+  kill        Kill service
+
+EOF
+  exit 1
+}
+
+function invalid_args {
+  echo -e "Invalid arguments: $1\n"
+  print_usage
+  exit 1
+}
+
+function get_host {
+  host="$(hostname -s)"
+  if [[ -z "$host" ]]; then
+    netcmd=/sbin/ifconfig
+    [[ ! -x $netcmd ]] && netcmd='/bin/netstat -ie'
+    host=$($netcmd 2>/dev/null| grep "inet[^6]" | awk '{print $2}' | sed 
's/addr://' | grep -v 0.0.0.0 | grep -v 127.0.0.1 | head -n 1)
+    if [[ $? != 0 ]]; then
+      host=$(python -c 'import socket as s; print 
s.gethostbyname(s.getfqdn())')
+    fi
+  fi 
+  echo "$host"
+}
+
+function main() {
+
+  if [[ -z $1 ]]; then
+    invalid_args "<service> cannot be empty"
+  fi
+
+  # Resolve base directory
+  SOURCE="${BASH_SOURCE[0]}"
+  while [ -h "${SOURCE}" ]; do
+     bin="$( cd -P "$( dirname "${SOURCE}" )" && pwd )"
+     SOURCE="$(readlink "${SOURCE}")"
+     [[ "${SOURCE}" != /* ]] && SOURCE="${bin}/${SOURCE}"
+  done
+  bin="$( cd -P "$( dirname "${SOURCE}" )" && pwd )"
+  basedir=$( cd -P "${bin}"/.. && pwd )
+
+  source "$basedir"/libexec/load-env.sh
+
+  service=$1
+  case "$service" in
+    gc|master|monitor|tserver|tracer)
+      if [[ -z $2 ]]; then
+        invalid_args "<command> cannot be empty"
+      fi
+      case "$2" in 
+        start)
+          "${ACCUMULO_LIBEXEC_DIR}/service.sh" start "$(get_host)" "$1"
+          ;;
+        stop)
+          "${ACCUMULO_LIBEXEC_DIR}/service.sh" stop "$(get_host)" "$service" 
TERM
+          ;;
+        kill)
+          "${ACCUMULO_LIBEXEC_DIR}/service.sh" stop "$(get_host)" "$service" 
KILL
+          ;;
+        *)
+          invalid_args "'$2' is an invalid <command>"
+          ;;
+      esac
+      ;;
+    *)
+      invalid_args "'$1' is an invalid <service>"
+      ;;
+  esac
+}
+
+main "$@"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/158cf16d/assemble/conf/examples/accumulo-metrics.xml
----------------------------------------------------------------------
diff --git a/assemble/conf/examples/accumulo-metrics.xml 
b/assemble/conf/examples/accumulo-metrics.xml
index 3b97809..24228eb 100644
--- a/assemble/conf/examples/accumulo-metrics.xml
+++ b/assemble/conf/examples/accumulo-metrics.xml
@@ -23,7 +23,7 @@
    Metrics log directory
 -->
   <logging>
-    <dir>${ACCUMULO_HOME}/metrics</dir>
+    <dir>${ACCUMULO_LOG_DIR}/metrics</dir>
   </logging>
 <!--
  Enable/Disable metrics accumulation on the different servers and their 
components

http://git-wip-us.apache.org/repos/asf/accumulo/blob/158cf16d/assemble/libexec/accumulo-watcher.sh
----------------------------------------------------------------------
diff --git a/assemble/libexec/accumulo-watcher.sh 
b/assemble/libexec/accumulo-watcher.sh
new file mode 100755
index 0000000..bebe42e
--- /dev/null
+++ b/assemble/libexec/accumulo-watcher.sh
@@ -0,0 +1,141 @@
+#! /usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOGHOST=$1
+shift
+process=$1
+
+SOURCE="${BASH_SOURCE[0]}"
+while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a 
symlink
+  libexec="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
+  SOURCE="$(readlink "$SOURCE")"
+  [[ $SOURCE != /* ]] && SOURCE="$libexec/$SOURCE" # if $SOURCE was a relative 
symlink, we need to resolve it relative to the path where the symlink file was 
located
+done
+libexec="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
+# Stop: Resolve Script Directory
+
+source "${libexec}"/load-env.sh
+
+# Setting for watcher
+UNEXPECTED_TIMESPAN=${UNEXPECTED_TIMESPAN:-3600}
+UNEXPECTED_RETRIES=${UNEXPECTED_RETRIES:-2}
+OOM_TIMESPAN=${OOM_TIMESPAN-3600}
+OOM_RETRIES=${OOM_RETRIES-5}
+ZKLOCK_TIMESPAN=${ZKLOCK_TIMESPAN-600}
+ZKLOCK_RETRIES=${ZKLOCK_RETRIES-5}
+
+CLEAN_EXIT="Clean Exit"
+UNEXPECTED_EXCEPTION="Unexpected exception"
+OOM_EXCEPTION="Out of memory exception"
+ZKLOCK_LOST="ZKLock lost"
+UNKNOWN_ERROR="Unknown error"
+
+ERRFILE=${ACCUMULO_LOG_DIR}/${process}_${LOGHOST}.err
+OUTFILE=${ACCUMULO_LOG_DIR}/${process}_${LOGHOST}.out
+DEBUGLOG=${ACCUMULO_LOG_DIR}/${process}_$(hostname).debug.log
+COMMAND="${ACCUMULO_BIN_DIR}/accumulo \"\$@\""
+
+logger -s "starting process $process at $(date)"
+stopRunning=""
+while [ -z "$stopRunning" ];
+do
+  eval $COMMAND 2> $ERRFILE
+  exit=$?
+  unset cause
+  if [ "$exit" -eq 0 ]; then
+    potentialStopRunning=$CLEAN_EXIT
+  elif [ "$exit" -eq 1 ]; then
+    potentialStopRunning=$UNEXPECTED_EXCEPTION
+  elif [ "$exit" -eq 130 ]; then
+    stopRunning="Control C detected, exiting"
+  elif [ "$exit" -eq 143 ]; then
+    stopRunning="Process terminated, exiting"
+  elif [ "$exit" -eq 137 ]; then
+    potentialStopRunning="Process killed, exiting"
+  fi
+  if [ -z "$stopRunning" ]; then
+    stopRunning=$potentialStopRunning;
+
+    if [ $exit -eq 1 ]; then
+      source="exit code"
+      cause=$UNEXPECTED_EXCEPTION
+    elif tail -n50 $OUTFILE | grep "java.lang.OutOfMemoryError:" > /dev/null; 
then
+      source="logs"
+      cause=$OOM_EXCEPTION
+    elif [ "$process" = "tserver" ]; then
+      if tail -n50 $DEBUGLOG | grep "ERROR: Lost tablet server lock (reason =" 
> /dev/null ; then
+        source="logs"
+        cause=$ZKLOCK_LOST
+      fi
+    elif [ "$process" = "master" ]; then
+      if tail -n50 $DEBUGLOG | grep "ERROR: Master lock in zookeeper lost 
(reason =" > /dev/null ; then
+        source="logs"
+        cause=$ZKLOCK_LOST
+      fi
+    elif [ "$process" = "gc" ]; then
+      if tail -n50 $DEBUGLOG | grep "FATAL: GC lock in zookeeper lost (reason 
=" > /dev/null ; then
+        source="logs"
+        cause=$ZKLOCK_LOST
+      fi
+    elif [ "$process" = "monitor" ]; then
+      if tail -n50 $DEBUGLOG | grep "ERROR:  Monitor lock in zookeeper lost 
(reason =" > /dev/null ; then
+        source="logs"
+        cause=$ZKLOCK_LOST
+      fi
+    elif [ $exit -ne 0 ]; then
+      source="exit code"
+      cause=$UNKNOWN_ERROR
+    fi
+    case $cause in
+      #Unknown exit code
+      "$UNKNOWN_ERROR")
+        #window doesn't matter when retries = 0
+        RETRIES=0
+        ;;
+
+      "$UNEXPECTED_EXCEPTION")
+        WINDOW=$UNEXPECTED_TIMESPAN
+        RETRIES=$UNEXPECTED_RETRIES
+        ;;
+
+      "$OOM_EXCEPTION") 
+        WINDOW=$OOM_TIMESPAN
+        RETRIES=$OOM_RETRIES
+        ;;
+
+      "$ZLOCK_LOST")
+        WINDOW=$ZKLOCK_TIMESPAN
+        RETRIES=$ZKLOCK_RETRIES
+        ;;
+    esac
+
+    if [ -n "$cause" ]; then
+      stopRunning=""
+      declare -i attempts
+      attempts="`jobs | grep "reason$cause" | wc -l`+1"
+      if [ "$RETRIES" -le $attempts ]; then
+        stopRunning="$process encountered $cause in $source with exit code 
$exit- quitting ($attempts/$RETRIES in $WINDOW seconds)"
+        # kill all sleeps now
+        for list in `jobs | cut -b 2-2`; do kill %$list; done
+      else
+        logger -s "$process encountered $cause in $source with exit code 
$exit- retrying ($attempts/$RETRIES in $WINDOW seconds)"
+        eval "(sleep $WINDOW ; echo "reason$cause" >> /dev/null) &" 
+      fi
+    fi 
+  fi
+done
+logger -s $stopRunning

http://git-wip-us.apache.org/repos/asf/accumulo/blob/158cf16d/assemble/libexec/bootstrap-hdfs.sh
----------------------------------------------------------------------
diff --git a/assemble/libexec/bootstrap-hdfs.sh 
b/assemble/libexec/bootstrap-hdfs.sh
new file mode 100755
index 0000000..b678f27
--- /dev/null
+++ b/assemble/libexec/bootstrap-hdfs.sh
@@ -0,0 +1,90 @@
+#! /usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Start: Resolve Script Directory
+SOURCE="${BASH_SOURCE[0]}"
+while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a 
symlink
+  libexec=$( cd -P "$( dirname "$SOURCE" )" && pwd )
+  SOURCE=$(readlink "$SOURCE")
+  [[ $SOURCE != /* ]] && SOURCE="$libexec/$SOURCE" # if $SOURCE was a relative 
symlink, we need to resolve it relative to the path where the symlink file was 
located
+done
+libexec=$( cd -P "$( dirname "$SOURCE" )" && pwd )
+# Stop: Resolve Script Directory
+
+source "$libexec"/load-env.sh
+
+#
+# Find the system context directory in HDFS
+#
+SYSTEM_CONTEXT_HDFS_DIR=$(grep -A1 "general.vfs.classpaths" 
"$ACCUMULO_CONF_DIR/accumulo-site.xml" | tail -1 | perl -pe 's/\s+<value>//; 
s/<\/value>//; s/,.+$//; s|[^/]+$||; print $ARGV[1]')
+
+if [ -z "$SYSTEM_CONTEXT_HDFS_DIR" ]
+then
+   echo "Your accumulo-site.xml file is not set up for the HDFS Classloader. 
Please add the following to your accumulo-site.xml file where ##CLASSPATH## is 
one of the following formats:"
+   echo "A single directory: hdfs://host:port/directory/"
+   echo "A single directory with a regex: hdfs://host:port/directory/.*.jar"
+   echo "Multiple directories: 
hdfs://host:port/directory/.*.jar,hdfs://host:port/directory2/"
+   echo ""
+   echo "<property>"
+   echo "   <name>general.vfs.classpaths</name>"
+   echo "   <value>##CLASSPATH##</value>"
+   echo "   <description>location of the jars for the default (system) 
context</description>"
+   echo "</property>"
+   exit 1
+fi
+
+#
+# Create the system context directy in HDFS if it does not exist
+#
+"$HADOOP_PREFIX/bin/hadoop" fs -ls "$SYSTEM_CONTEXT_HDFS_DIR"  > /dev/null
+if [[ $? != 0 ]]; then
+   "$HADOOP_PREFIX/bin/hadoop" fs -mkdir "$SYSTEM_CONTEXT_HDFS_DIR"  > 
/dev/null
+   if [[ $? != 0 ]]; then
+      echo "Unable to create classpath directory at $SYSTEM_CONTEXT_HDFS_DIR"
+      exit 1
+   fi
+fi
+
+#
+# Replicate to all tservers to avoid network contention on startup
+#
+TSERVERS=$ACCUMULO_CONF_DIR/tservers
+NUM_TSERVERS=$(egrep -v '(^#|^\s*$)' "$TSERVERS" | wc -l)
+
+#let each datanode service around 50 clients
+REP=$(( NUM_TSERVERS / 50 ))
+(( REP < 3 )) && REP=3
+
+#
+# Copy all jars in lib to the system context directory
+#
+"$HADOOP_PREFIX/bin/hadoop" fs -moveFromLocal "$ACCUMULO_LIB_DIR/*.jar 
"$SYSTEM_CONTEXT_HDFS_DIR"  > /dev/null
+"$HADOOP_PREFIX/bin/hadoop" fs -setrep -R $REP "$SYSTEM_CONTEXT_HDFS_DIR"  > 
/dev/null
+
+#
+# We need some of the jars in lib, copy them back out and remove them from the 
system context dir
+#
+"$HADOOP_PREFIX/bin/hadoop" fs -copyToLocal 
"$SYSTEM_CONTEXT_HDFS_DIR/commons-vfs2.jar" "$ACCUMULO_LIB_DIR/."  > /dev/null
+"$HADOOP_PREFIX/bin/hadoop" fs -rm "$SYSTEM_CONTEXT_HDFS_DIR/commons-vfs2.jar" 
 > /dev/null
+"$HADOOP_PREFIX/bin/hadoop" fs -copyToLocal 
"$SYSTEM_CONTEXT_HDFS_DIR/accumulo-start.jar" "$ACCUMULO_LIB_DIR/."  > /dev/null
+"$HADOOP_PREFIX/bin/hadoop" fs -rm 
"$SYSTEM_CONTEXT_HDFS_DIR/accumulo-start.jar"  > /dev/null
+"$HADOOP_PREFIX/bin/hadoop" fs -copyToLocal 
"$SYSTEM_CONTEXT_HDFS_DIR/slf4j*.jar" "$ACCUMULO_LIB_DIR/."  > /dev/null
+"$HADOOP_PREFIX/bin/hadoop" fs -rm "$SYSTEM_CONTEXT_HDFS_DIR/slf4j*.jar"  > 
/dev/null
+for f in $(grep -v '^#' "$ACCUMULO_CONF_DIR/tservers")
+do
+  rsync -ra --delete "$ACCUMULO_HOME" $(dirname "$ACCUMULO_HOME")
+done

http://git-wip-us.apache.org/repos/asf/accumulo/blob/158cf16d/assemble/libexec/check-tservers
----------------------------------------------------------------------
diff --git a/assemble/libexec/check-tservers b/assemble/libexec/check-tservers
new file mode 100755
index 0000000..7f9850e
--- /dev/null
+++ b/assemble/libexec/check-tservers
@@ -0,0 +1,199 @@
+#! /usr/bin/env python
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script will check the configuration and uniformity of all the nodes in 
a cluster.
+# Checks
+#   each node is reachable via ssh
+#   login identity is the same
+#   the physical memory is the same
+#   the mounts are the same on each machine
+#   a set of writable locations (typically different disks) are in fact 
writable
+# 
+# In order to check for writable partitions, you must configure the WRITABLE 
variable below.
+#
+
+import subprocess
+import time
+import select
+import os
+import sys
+import fcntl
+import signal
+if not sys.platform.startswith('linux'):
+   sys.stderr.write('This script only works on linux, sorry.\n')
+   sys.exit(1)
+
+TIMEOUT = 5
+WRITABLE = []
+#WRITABLE = ['/srv/hdfs1', '/srv/hdfs2', '/srv/hdfs3']
+
+def ssh(tserver, *args):
+    'execute a command on a remote tserver and return the Popen handle'
+    handle = subprocess.Popen( ('ssh', '-o', 'StrictHostKeyChecking=no', '-q', 
'-A', '-n', tserver) + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    handle.tserver = tserver
+    handle.finished = False
+    handle.out = ''
+    return handle
+
+def wait(handles, seconds):
+    'wait for lots of handles simultaneously, and kill anything that doesn\'t 
return in seconds time\n'
+    'Note that stdout will be stored on the handle as the "out" field and 
"finished" will be set to True'
+    handles = handles[:]
+    stop = time.time() + seconds
+    for h in handles:
+       fcntl.fcntl(h.stdout, fcntl.F_SETFL, os.O_NONBLOCK)
+    while handles and time.time() < stop:
+       wait = min(0, stop - time.time())
+       handleMap = dict( [(h.stdout, h) for h in handles] )
+       rd, wr, err = select.select(handleMap.keys(), [], [], wait)
+       for r in rd:
+           handle = handleMap[r]
+           while 1:
+               more = handle.stdout.read(1024)
+               if more == '':
+                   handles.remove(handle)
+                   handle.poll()
+                   handle.wait()
+                   handle.finished = True
+               handle.out += more
+               if len(more) < 1024:
+                   break
+    for handle in handles:
+       os.kill(handle.pid, signal.SIGKILL)
+       handle.poll()
+
+def runAll(tservers, *cmd):
+    'Run the given command on all the tservers, returns Popen handles'
+    handles = []
+    for tserver in tservers:
+        handles.append(ssh(tserver, *cmd))
+    wait(handles, TIMEOUT)
+    return handles
+
+def checkIdentity(tservers):
+    'Ensure the login identity is consistent across the tservers'
+    handles = runAll(tservers, 'id', '-u', '-n')
+    bad = set()
+    myIdentity = os.popen('id -u -n').read().strip()
+    for h in handles:
+        if not h.finished or h.returncode != 0:
+            print '#', 'cannot look at identity on', h.tserver
+            bad.add(h.tserver)
+        else:
+            identity = h.out.strip()
+            if identity != myIdentity:
+                print '#', h.tserver, 'inconsistent identity', identity
+                bad.add(h.tserver)
+    return bad
+
+def checkMemory(tservers):
+    'Run free on all tservers and look for weird results'
+    handles = runAll(tservers, 'free')
+    bad = set()
+    mem = {}
+    swap = {}
+    for h in handles:
+        if not h.finished or h.returncode != 0:
+            print '#', 'cannot look at memory on', h.tserver
+            bad.add(h.tserver)
+        else:
+            if h.out.find('Swap:') < 0:
+               print '#',h.tserver,'has no swap'
+               bad.add(h.tserver)
+               continue
+            lines = h.out.split('\n')
+            for line in lines:
+               if line.startswith('Mem:'):
+                  mem.setdefault(line.split()[1],set()).add(h.tserver)
+               if line.startswith('Swap:'):
+                  swap.setdefault(line.split()[1],set()).add(h.tserver)
+    # order memory sizes by most common
+    mems = sorted([(len(v), k, v) for k, v in mem.items()], reverse=True)
+    mostCommon = float(mems[0][1])
+    for _, size, tservers in mems[1:]:
+        fract = abs(mostCommon - float(size)) / mostCommon
+        if fract > 0.05:
+            print '#',', '.join(tservers), ': unusual memory size', size
+            bad.update(tservers)
+    swaps = sorted([(len(v), k, v) for k, v in swap.items()], reverse=True)
+    mostCommon = float(mems[0][1])
+    for _, size, tservers in swaps[1:]:
+        fract = abs(mostCommon - float(size) / mostCommon)
+        if fract > 0.05:
+            print '#',', '.join(tservers), ': unusual swap size', size
+            bad.update(tservers)
+    return bad
+
+def checkWritable(tservers):
+    'Touch all the directories that should be writable by this user return any 
nodes that fail'
+    if not WRITABLE:
+       print '# WRITABLE value not configured, not checking partitions'
+       return []
+    handles = runAll(tservers, 'touch', *WRITABLE)
+    bad = set()
+    for h in handles:
+        if not h.finished or h.returncode != 0:
+           bad.add(h.tserver)
+           print '#', h.tserver, 'some drives are not writable'
+    return bad
+
+def checkMounts(tservers):
+    'Check the file systems that are mounted and report any that are unusual'
+    handles = runAll(tservers, 'mount')
+    mounts = {}
+    finished = set()
+    bad = set()
+    for handle in handles:
+        if handle.finished and handle.returncode == 0:
+            for line in handle.out.split('\n'):
+                words = line.split()
+                if len(words) < 5: continue
+                if words[4] == 'nfs': continue
+                if words[0].find(':/') >= 0: continue
+                mount = words[2]
+                mounts.setdefault(mount, set()).add(handle.tserver)
+            finished.add(handle.tserver)
+        else:
+            bad.add(handle.tserver)
+            print '#', handle.tserver, 'did not finish'
+    for m in sorted(mounts.keys()):
+        diff = finished - mounts[m]
+        if diff:
+            bad.update(diff)
+            print '#', m, 'not mounted on', ', '.join(diff)
+    return bad
+
+def main(argv):
+    if len(argv) < 1:
+        sys.stderr.write('Usage: check_tservers tservers\n')
+        sys.exit(1)
+    sys.stdin.close()
+    tservers = set()
+    for tserver in open(argv[0]):
+        hashPos = tserver.find('#')
+        if hashPos >= 0:
+           tserver = tserver[:hashPos]
+        tserver = tserver.strip()
+        if not tserver: continue
+        tservers.add(tserver)
+    bad = set()
+    for test in checkIdentity, checkMemory, checkMounts, checkWritable:
+        bad.update(test(tservers - bad))
+    for tserver in sorted(tservers - bad):
+        print tserver
+
+main(sys.argv[1:])

http://git-wip-us.apache.org/repos/asf/accumulo/blob/158cf16d/assemble/libexec/cluster.sh
----------------------------------------------------------------------
diff --git a/assemble/libexec/cluster.sh b/assemble/libexec/cluster.sh
new file mode 100755
index 0000000..d3a2f5a
--- /dev/null
+++ b/assemble/libexec/cluster.sh
@@ -0,0 +1,330 @@
+#! /usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+function print_usage {
+  cat <<EOF
+Usage: cluster.sh <command> (<argument> ...)
+
+Commands:
+  start-all [--notTservers]       Starts all services on cluster
+  start-tservers                  Starts all tservers on cluster
+  start-here                      Starts all services on this node
+  start-service <host> <service>  Starts <service> on <host>
+  stop-all                        Stops all services on cluster
+  stop-tservers                   Stops all tservers on cluster
+  stop-here                       Stops all services on this node
+
+EOF
+  exit 1
+}
+
+function invalid_args {
+  echo -e "Invalid arguments: $1\n"
+  print_usage
+  exit 1
+}
+
+function get_ip() {
+  net_cmd=/sbin/ifconfig
+  [[ ! -x $net_cmd ]] && net_cmd='/bin/netstat -ie'
+
+  ip_addr=$($net_cmd 2>/dev/null| grep "inet[^6]" | awk '{print $2}' | sed 
's/addr://' | grep -v 0.0.0.0 | grep -v 127.0.0.1 | head -n 1)
+  if [[ $? != 0 ]] ; then
+     ip_addr=$(python -c 'import socket as s; print 
s.gethostbyname(s.getfqdn())')
+  fi
+  echo "$ip_addr"
+}
+
+function start_service() {
+  host="$1"
+  service="$2"
+
+  if [[ $host == "localhost" || $host == $(hostname -f) || $host == $(hostname 
-s) || $host == $(get_ip) ]]; then
+    "$libexec/service.sh" start "$host" "$service"
+  else
+    $SSH "$host" "bash -c 'ACCUMULO_CONF_DIR=${ACCUMULO_CONF_DIR} 
$libexec/service.sh start \"$host\" \"$service\"'"
+  fi
+}
+
+function start_tservers() {
+  echo -n "Starting tablet servers ..."
+  count=1
+  for server in $(egrep -v '(^#|^\s*$)' "${ACCUMULO_CONF_DIR}/tservers"); do
+     echo -n "."
+     start_service "$server" tserver &
+     if (( ++count % 72 == 0 )) ;
+     then
+        echo
+        wait
+     fi
+  done
+  echo " done"
+}
+
+function start_all() {
+  unset DISPLAY
+
+  start_service "$monitor" monitor 
+
+  if [ "$1" != "--notTservers" ]; then
+     start_tservers
+  fi
+
+  ${accumulo_cmd} org.apache.accumulo.master.state.SetGoalState NORMAL
+  for master in $(egrep -v '(^#|^\s*$)' "$ACCUMULO_CONF_DIR/masters"); do
+     start_service "$master" master
+  done
+
+  for gc in $(egrep -v '(^#|^\s*$)' "$ACCUMULO_CONF_DIR/gc"); do
+     start_service "$gc" gc
+  done
+
+  for tracer in $(egrep -v '(^#|^\s*$)' "$ACCUMULO_CONF_DIR/tracers"); do
+     start_service "$tracer" tracer
+  done
+}
+
+function start_here() {
+
+  local_hosts="$(hostname -a 2> /dev/null) $(hostname) localhost 127.0.0.1 
$(get_ip)"
+  for host in $local_hosts; do
+     if grep -q "^${host}\$" "$ACCUMULO_CONF_DIR/tservers"; then
+        start_service "$host" tserver
+        break
+     fi
+  done
+
+  for host in $local_hosts; do
+     if grep -q "^${host}\$" "$ACCUMULO_CONF_DIR/masters"; then
+        ${accumulo_cmd} org.apache.accumulo.master.state.SetGoalState NORMAL
+        start_service "$host" master
+        break
+     fi
+  done
+
+  for host in $local_hosts; do
+     if grep -q "^${host}\$" "$ACCUMULO_CONF_DIR/gc"; then
+        start_service "$host" gc
+        break
+     fi
+  done
+
+  for host in $local_hosts; do
+     if [[ $host == "$monitor" ]]; then
+        start_service "$monitor" monitor 
+        break
+     fi
+  done
+
+  for host in $local_hosts; do
+     if grep -q "^${host}\$" "$ACCUMULO_CONF_DIR/tracers"; then
+        start_service "$host" tracer 
+        break
+     fi
+  done
+}
+
+function stop_service() {
+  host="$1"
+  service="$2"
+  signal="$3"
+
+  # only stop if there's not one already running
+  if [[ $host == localhost || $host = "$(hostname -s)" || $host = "$(hostname 
-f)" || $host = $(get_ip) ]] ; then
+    "$libexec/service.sh" stop "$host" "$service" "$signal"
+  else
+    $SSH "$host" "bash -c '$libexec/service.sh stop \"$host\" \"$service\" 
\"$signal\"'"
+  fi
+}
+
+function stop_tservers() {
+  tserver_hosts=$(egrep -v '(^#|^\s*$)' "${ACCUMULO_CONF_DIR}/tservers")
+
+  echo "Stopping unresponsive tablet servers (if any)..."
+  for server in ${tserver_hosts}; do
+     # only start if there's not one already running
+     stop_service "$server" tserver TERM & 
+  done
+
+  sleep 10
+
+  echo "Stopping unresponsive tablet servers hard (if any)..."
+  for server in ${tserver_hosts}; do
+     # only start if there's not one already running
+     stop_service "$server" tserver KILL & 
+  done
+
+  echo "Cleaning tablet server entries from zookeeper"
+  ${accumulo_cmd} org.apache.accumulo.server.util.ZooZap -tservers
+}
+
+function stop_all() {
+  echo "Stopping accumulo services..."
+  if ! ${accumulo_cmd} admin stopAll
+  then
+     echo "Invalid password or unable to connect to the master"
+     echo "Initiating forced shutdown in 15 seconds (Ctrl-C to abort)"
+     sleep 10
+     echo "Initiating forced shutdown in  5 seconds (Ctrl-C to abort)"
+  else
+     echo "Accumulo shut down cleanly"
+     echo "Utilities and unresponsive servers will shut down in 5 seconds 
(Ctrl-C to abort)"
+  fi
+
+  sleep 5
+
+  #look for master and gc processes not killed by 'admin stopAll'
+  for signal in TERM KILL ; do
+     for master in $(grep -v '^#' "$ACCUMULO_CONF_DIR/masters"); do
+        stop_service "$master" master $signal
+     done
+
+     for gc in $(grep -v '^#' "$ACCUMULO_CONF_DIR/gc"); do
+        stop_service "$gc" gc $signal
+     done
+
+     stop_service "$monitor" monitor $signal
+
+     for tracer in $(egrep -v '(^#|^\s*$)' "$ACCUMULO_CONF_DIR/tracers"); do
+        stop_service "$tracer" tracer $signal
+     done
+  done
+
+  # stop tserver still running
+  stop_tservers
+
+  echo "Cleaning all server entries in ZooKeeper"
+  ${accumulo_cmd} org.apache.accumulo.server.util.ZooZap -master -tservers 
-tracers --site-file "$ACCUMULO_CONF_DIR/accumulo-site.xml"
+}
+
+function stop_here() {
+  # Determine hostname without errors to user
+  hosts_to_check=($(hostname -a 2> /dev/null | head -1) $(hostname -f))
+
+  if egrep -q localhost\|127.0.0.1 "$ACCUMULO_CONF_DIR/tservers"; then
+     ${accumulo_cmd} admin stop localhost
+  else
+     for host in "${hosts_to_check[@]}"; do
+        if grep -q "$host" "$ACCUMULO_CONF_DIR"/tservers; then
+           ${accumulo_cmd} admin stop "$host"
+        fi
+     done
+  fi
+
+  for host in "${hosts_to_check[@]}"; do
+     for signal in TERM KILL; do
+        for svc in tserver gc master monitor tracer; do
+           stop_service "$host" $svc $signal
+        done
+     done
+  done
+}
+
+function main() {
+  # Start: Resolve Script Directory
+  SOURCE="${BASH_SOURCE[0]}"
+  while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a 
symlink
+    libexec="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
+    SOURCE="$(readlink "$SOURCE")"
+    [[ $SOURCE != /* ]] && SOURCE="$libexec/$SOURCE" # if $SOURCE was a 
relative symlink, we need to resolve it relative to the path where the symlink 
file was located
+  done
+  libexec="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
+  # Stop: Resolve Script Directory
+
+  source "$libexec"/load-env.sh
+
+  if [[ -f $ACCUMULO_CONF_DIR/slaves ]]; then
+    echo "ERROR: A 'slaves' file was found in $ACCUMULO_CONF_DIR/"
+    echo "Accumulo now reads tablet server hosts from 'tservers' and requires 
that the 'slaves' file not be present to reduce confusion."
+    echo "Please rename the 'slaves' file to 'tservers' or remove it if both 
exist."
+    exit 1
+  fi
+
+  if [[ ! -f $ACCUMULO_CONF_DIR/tservers ]]; then
+    echo "ERROR: A 'tservers' file was not found at 
$ACCUMULO_CONF_DIR/tservers"
+    echo "Please make sure it exists and is configured with tablet server 
hosts."
+    exit 1
+  fi
+
+  unset master1
+  if [[ -f "$ACCUMULO_CONF_DIR/masters" ]]; then
+    master1=$(egrep -v '(^#|^\s*$)' "$ACCUMULO_CONF_DIR/masters" | head -1)
+  fi
+
+  if [[ -z "${monitor}" ]] ; then
+    monitor=$master1
+    if [[ -f "$ACCUMULO_CONF_DIR/monitor" ]]; then
+        monitor=$(egrep -v '(^#|^\s*$)' "$ACCUMULO_CONF_DIR/monitor" | head -1)
+    fi
+    if [[ -z "${monitor}" ]] ; then
+      echo "Could not infer a Monitor role. You need to either define 
\"${ACCUMULO_CONF_DIR}/monitor\"," 
+      echo "or make sure \"${ACCUMULO_CONF_DIR}/masters\" is non-empty."
+      exit 1
+    fi
+  fi
+  if [[ ! -f "$ACCUMULO_CONF_DIR/tracers" ]]; then
+    if [[ -z "${master1}" ]] ; then
+      echo "Could not find a master node to use as a default for the tracer 
role."
+      echo "Either set up \"${ACCUMULO_CONF_DIR}/tracers\" or make sure 
\"${ACCUMULO_CONF_DIR}/masters\" is non-empty."
+      exit 1
+    else
+      echo "$master1" > "$ACCUMULO_CONF_DIR/tracers"
+    fi
+  fi
+
+  if [[ ! -f "$ACCUMULO_CONF_DIR/gc" ]]; then
+    if [[ -z "${master1}" ]] ; then
+      echo "Could not infer a GC role. You need to either set up 
\"${ACCUMULO_CONF_DIR}/gc\" or make sure \"${ACCUMULO_CONF_DIR}/masters\" is 
non-empty."
+      exit 1
+    else
+      echo "$master1" > "$ACCUMULO_CONF_DIR/gc"
+    fi
+  fi
+  accumulo_cmd="$ACCUMULO_BIN_DIR/accumulo"
+
+  SSH='ssh -qnf -o ConnectTimeout=2'
+
+  if [[ -z $1 ]]; then
+    invalid_args "<command> cannot be empty"
+  fi
+
+  case "$1" in
+    start-all)
+      start_all "${*:2}"
+      ;;
+    start-tservers)
+      start_tservers
+      ;;
+    start-here)
+      start_here
+      ;;
+    stop-all)
+      stop_all
+      ;;
+    stop-tservers)
+      stop_tservers
+      ;;
+    stop-here)
+      stop_here
+      ;;
+    *)
+      invalid_args "'$1' is an invalid <command>"
+      ;;
+  esac
+}
+
+main "$@"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/158cf16d/assemble/libexec/config.sh
----------------------------------------------------------------------
diff --git a/assemble/libexec/config.sh b/assemble/libexec/config.sh
new file mode 100755
index 0000000..6a7ffaf
--- /dev/null
+++ b/assemble/libexec/config.sh
@@ -0,0 +1,408 @@
+#! /usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+function usage {
+  cat <<EOF
+Usage: config.sh [-options]
+where options include (long options not available on all platforms):
+    -d, --dir        Alternate directory to setup config files
+    -s, --size       Supported sizes: '1GB' '2GB' '3GB' '512MB'
+    -n, --native     Configure to use native libraries
+    -j, --jvm        Configure to use the jvm
+    -o, --overwrite  Overwrite the default config directory
+    -v, --version    Specify the Apache Hadoop version supported versions: '1' 
'2'
+    -k, --kerberos   Configure for use with Kerberos
+    -h, --help       Print this help message
+EOF
+}
+
+# Start: Resolve Script Directory
+SOURCE="${BASH_SOURCE[0]}"
+while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a 
symlink
+  libexec="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
+  SOURCE="$(readlink "$SOURCE")"
+  [[ $SOURCE != /* ]] && SOURCE="$libexec/$SOURCE" # if $SOURCE was a relative 
symlink, we need to resolve it relative to the path where the symlink file was 
located
+done
+libexec="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
+basedir=$( cd -P "${libexec}"/.. && pwd )
+# Stop: Resolve Script Directory
+
+TEMPLATE_CONF_DIR="${libexec}/templates"
+CONF_DIR="${ACCUMULO_CONF_DIR:-$basedir/conf}"
+ACCUMULO_SITE=accumulo-site.xml
+ACCUMULO_ENV=accumulo-env.sh
+
+SIZE=
+TYPE=
+HADOOP_VERSION=
+OVERWRITE="0"
+BASE_DIR=
+KERBEROS=
+
+#Execute getopt
+if [[ $(uname -s) == "Linux" ]]; then
+  args=$(getopt -o "b:d:s:njokv:h" -l 
"basedir:,dir:,size:,native,jvm,overwrite,kerberos,version:,help" -q -- "$@")
+else # Darwin, BSD
+  args=$(getopt b:d:s:njokv:h $*)
+fi
+
+#Bad arguments
+if [[ $? != 0 ]]; then
+  usage 1>&2
+  exit 1
+fi
+eval set -- $args
+
+for i
+do
+  case "$i" in
+    -b|--basedir) #Hidden option used to set general.maven.project.basedir for 
developers
+      BASE_DIR=$2; shift
+      shift;;
+    -d|--dir)
+      CONF_DIR=$2; shift
+      shift;;
+    -s|--size)
+      SIZE=$2; shift
+      shift;;
+    -n|--native)
+      TYPE=native
+      shift;;
+    -j|--jvm)
+      TYPE=jvm
+      shift;;
+    -o|--overwrite)
+      OVERWRITE=1
+      shift;;
+    -v|--version)
+      HADOOP_VERSION=$2; shift
+      shift;;
+    -k|--kerberos)
+      KERBEROS="true"
+      shift;;
+    -h|--help)
+      usage
+      exit 0
+      shift;;
+    --)
+    shift
+    break;;
+  esac
+done
+
+while [[ "${OVERWRITE}" = "0" ]]; do
+  if [[ -e "${CONF_DIR}/${ACCUMULO_ENV}" || -e "${CONF_DIR}/${ACCUMULO_SITE}" 
]]; then
+    echo "Warning your current config files in ${CONF_DIR} will be 
overwritten!"
+    echo
+    echo "How would you like to proceed?:"
+    select CHOICE in 'Continue with overwrite' 'Specify new conf dir'; do
+      if [[ "${CHOICE}" = 'Specify new conf dir' ]]; then
+        echo -n "Please specifiy new conf directory: "
+        read CONF_DIR
+      elif [[ "${CHOICE}" = 'Continue with overwrite' ]]; then
+        OVERWRITE=1
+      fi
+      break
+    done
+  else
+    OVERWRITE=1
+  fi
+done
+echo "Copying configuration files to: ${CONF_DIR}"
+
+#Native 1GB
+native_1GB_tServer="-Xmx128m -Xms128m"
+_1GB_master="-Xmx128m -Xms128m"
+_1GB_monitor="-Xmx64m -Xms64m"
+_1GB_gc="-Xmx64m -Xms64m"
+_1GB_other="-Xmx128m -Xms64m"
+_1GB_shell="${_1GB_other}"
+
+_1GB_memoryMapMax="256M"
+native_1GB_nativeEnabled="true"
+_1GB_cacheDataSize="15M"
+_1GB_cacheIndexSize="40M"
+_1GB_sortBufferSize="50M"
+_1GB_waLogMaxSize="256M"
+
+#Native 2GB
+native_2GB_tServer="-Xmx256m -Xms256m"
+_2GB_master="-Xmx256m -Xms256m"
+_2GB_monitor="-Xmx128m -Xms64m"
+_2GB_gc="-Xmx128m -Xms128m"
+_2GB_other="-Xmx256m -Xms64m"
+_2GB_shell="${_2GB_other}"
+
+_2GB_memoryMapMax="512M"
+native_2GB_nativeEnabled="true"
+_2GB_cacheDataSize="30M"
+_2GB_cacheIndexSize="80M"
+_2GB_sortBufferSize="50M"
+_2GB_waLogMaxSize="512M"
+
+#Native 3GB
+native_3GB_tServer="-Xmx1g -Xms1g -XX:NewSize=500m -XX:MaxNewSize=500m"
+_3GB_master="-Xmx1g -Xms1g"
+_3GB_monitor="-Xmx1g -Xms256m"
+_3GB_gc="-Xmx256m -Xms256m"
+_3GB_other="-Xmx1g -Xms256m"
+_3GB_shell="${_3GB_other}"
+
+_3GB_memoryMapMax="1G"
+native_3GB_nativeEnabled="true"
+_3GB_cacheDataSize="128M"
+_3GB_cacheIndexSize="128M"
+_3GB_sortBufferSize="200M"
+_3GB_waLogMaxSize="1G"
+
+#Native 512MB
+native_512MB_tServer="-Xmx48m -Xms48m"
+_512MB_master="-Xmx128m -Xms128m"
+_512MB_monitor="-Xmx64m -Xms64m"
+_512MB_gc="-Xmx64m -Xms64m"
+_512MB_other="-Xmx128m -Xms64m"
+_512MB_shell="${_512MB_other}"
+
+_512MB_memoryMapMax="80M"
+native_512MB_nativeEnabled="true"
+_512MB_cacheDataSize="7M"
+_512MB_cacheIndexSize="20M"
+_512MB_sortBufferSize="50M"
+_512MB_waLogMaxSize="100M"
+
+#JVM 1GB
+jvm_1GB_tServer="-Xmx384m -Xms384m"
+
+jvm_1GB_nativeEnabled="false"
+
+#JVM 2GB
+jvm_2GB_tServer="-Xmx768m -Xms768m"
+
+jvm_2GB_nativeEnabled="false"
+
+#JVM 3GB
+jvm_3GB_tServer="-Xmx2g -Xms2g -XX:NewSize=1G -XX:MaxNewSize=1G"
+
+jvm_3GB_nativeEnabled="false"
+
+#JVM 512MB
+jvm_512MB_tServer="-Xmx128m -Xms128m"
+
+jvm_512MB_nativeEnabled="false"
+
+
+if [[ -z "${SIZE}" ]]; then
+  echo "Choose the heap configuration:"
+  select DIRNAME in 1GB 2GB 3GB 512MB; do
+    echo "Using '${DIRNAME}' configuration"
+    SIZE=${DIRNAME}
+    break
+  done
+elif [[ "${SIZE}" != "1GB" && "${SIZE}" != "2GB"  && "${SIZE}" != "3GB" && 
"${SIZE}" != "512MB" ]]; then
+  echo "Invalid memory size"
+  echo "Supported sizes: '1GB' '2GB' '3GB' '512MB'"
+  exit 1
+fi
+
+if [[ -z "${TYPE}" ]]; then
+  echo
+  echo "Choose the Accumulo memory-map type:"
+  select TYPENAME in Java Native; do
+    if [[ "${TYPENAME}" == "Native" ]]; then
+      TYPE="native"
+      echo "Don't forget to build the native libraries using the 
bin/build_native_library.sh script"
+    elif [[ "${TYPENAME}" == "Java" ]]; then
+      TYPE="jvm"
+    fi
+    echo "Using '${TYPE}' configuration"
+    echo
+    break
+  done
+fi
+
+if [[ -z "${HADOOP_VERSION}" ]]; then
+  echo
+  echo "Choose the Apache Hadoop version:"
+  select HADOOP in 'Hadoop 2' 'HDP 2.0/2.1' 'HDP 2.2' 'IOP 4.1'; do
+    if [ "${HADOOP}" == "Hadoop 2" ]; then
+      HADOOP_VERSION="2"
+    elif [ "${HADOOP}" == "HDP 2.0/2.1" ]; then
+      HADOOP_VERSION="HDP2"
+    elif [ "${HADOOP}" == "HDP 2.2" ]; then
+      HADOOP_VERSION="HDP2.2"
+    elif [ "${HADOOP}" == "IOP 4.1" ]; then
+      HADOOP_VERSION="IOP4.1"
+    fi
+    echo "Using Hadoop version '${HADOOP_VERSION}' configuration"
+    echo
+    break
+  done
+elif [[ "${HADOOP_VERSION}" != "2" && "${HADOOP_VERSION}" != "HDP2" && 
"${HADOOP_VERSION}" != "HDP2.2" ]]; then
+  echo "Invalid Hadoop version"
+  echo "Supported Hadoop versions: '2', 'HDP2', 'HDP2.2'"
+  exit 1
+fi
+
+TRACE_USER="root"
+
+if [[ ! -z "${KERBEROS}" ]]; then
+  echo
+  read -p "Enter server's Kerberos principal: " PRINCIPAL
+  read -p "Enter server's Kerberos keytab: " KEYTAB
+  TRACE_USER="${PRINCIPAL}"
+fi
+
+for var in SIZE TYPE HADOOP_VERSION; do
+  if [[ -z ${!var} ]]; then
+    echo "Invalid $var configuration"
+    exit 1
+  fi
+done
+
+TSERVER="${TYPE}_${SIZE}_tServer"
+MASTER="_${SIZE}_master"
+MONITOR="_${SIZE}_monitor"
+GC="_${SIZE}_gc"
+SHELL="_${SIZE}_shell"
+OTHER="_${SIZE}_other"
+
+MEMORY_MAP_MAX="_${SIZE}_memoryMapMax"
+NATIVE="${TYPE}_${SIZE}_nativeEnabled"
+CACHE_DATA_SIZE="_${SIZE}_cacheDataSize"
+CACHE_INDEX_SIZE="_${SIZE}_cacheIndexSize"
+SORT_BUFFER_SIZE="_${SIZE}_sortBufferSize"
+WAL_MAX_SIZE="_${SIZE}_waLogMaxSize"
+
+MAVEN_PROJ_BASEDIR=""
+
+if [[ ! -z "${BASE_DIR}" ]]; then
+  MAVEN_PROJ_BASEDIR="\n  <property>\n    
<name>general.maven.project.basedir</name>\n    <value>${BASE_DIR}</value>\n  
</property>\n"
+fi
+
+mkdir -p "${CONF_DIR}" && cp ${TEMPLATE_CONF_DIR}/* ${CONF_DIR}/
+
+if [[ -f "${CONF_DIR}/examples/client.conf" ]]; then
+  cp ${CONF_DIR}/examples/client.conf ${CONF_DIR}/
+fi
+
+#Configure accumulo-env.sh
+sed -e "s/\${tServerHigh_tServerLow}/${!TSERVER}/" \
+    -e "s/\${masterHigh_masterLow}/${!MASTER}/" \
+    -e "s/\${monitorHigh_monitorLow}/${!MONITOR}/" \
+    -e "s/\${gcHigh_gcLow}/${!GC}/" \
+    -e "s/\${shellHigh_shellLow}/${!SHELL}/" \
+    -e "s/\${otherHigh_otherLow}/${!OTHER}/" \
+    ${TEMPLATE_CONF_DIR}/$ACCUMULO_ENV > ${CONF_DIR}/$ACCUMULO_ENV
+
+#Configure accumulo-site.xml
+sed -e "s/\${memMapMax}/${!MEMORY_MAP_MAX}/" \
+    -e "s/\${nativeEnabled}/${!NATIVE}/" \
+    -e "s/\${cacheDataSize}/${!CACHE_DATA_SIZE}/" \
+    -e "s/\${cacheIndexSize}/${!CACHE_INDEX_SIZE}/" \
+    -e "s/\${sortBufferSize}/${!SORT_BUFFER_SIZE}/" \
+    -e "s/\${waLogMaxSize}/${!WAL_MAX_SIZE}/" \
+    -e "s=\${traceUser}=${TRACE_USER}=" \
+    -e "s=\${mvnProjBaseDir}=${MAVEN_PROJ_BASEDIR}=" 
${TEMPLATE_CONF_DIR}/$ACCUMULO_SITE > ${CONF_DIR}/$ACCUMULO_SITE
+
+# If we're not using kerberos, filter out the krb properties
+if [[ -z "${KERBEROS}" ]]; then
+  sed -e 's/<!-- Kerberos requirements -->/<!-- Kerberos requirements 
--><!--/' \
+      -e 's/<!-- End Kerberos requirements -->/--><!-- End Kerberos 
requirements -->/' \
+      "${CONF_DIR}/$ACCUMULO_SITE" > temp
+  mv temp "${CONF_DIR}/$ACCUMULO_SITE"
+else
+  # Make the substitutions
+  sed -e "s!\${keytab}!${KEYTAB}!" \
+      -e "s!\${principal}!${PRINCIPAL}!" \
+      ${CONF_DIR}/${ACCUMULO_SITE} > temp
+  mv temp ${CONF_DIR}/${ACCUMULO_SITE}
+fi
+
+# Configure hadoop version
+if [[ "${HADOOP_VERSION}" == "2" ]]; then
+  sed -e 's/<!-- HDP 2.0 requirements -->/<!-- HDP 2.0 requirements --><!--/' \
+      -e 's/<!-- End HDP 2.0 requirements -->/--><!-- End HDP 2.0 requirements 
-->/' \
+      "${CONF_DIR}/$ACCUMULO_SITE" > temp
+  mv temp "${CONF_DIR}/$ACCUMULO_SITE"
+  sed -e 's/<!-- HDP 2.2 requirements -->/<!-- HDP 2.2 requirements --><!--/' \
+      -e 's/<!-- End HDP 2.2 requirements -->/--><!-- End HDP 2.2 requirements 
-->/' \
+      "${CONF_DIR}/$ACCUMULO_SITE" > temp
+  mv temp "${CONF_DIR}/$ACCUMULO_SITE"
+  sed -e 's/<!-- IOP 4.1 requirements -->/<!-- IOP 4.1 requirements --><!--/' \
+      -e 's/<!-- End IOP 4.1 requirements -->/--><!-- End IOP 4.1 requirements 
-->/' \
+      "${CONF_DIR}/$ACCUMULO_SITE" > temp
+  mv temp "${CONF_DIR}/$ACCUMULO_SITE"
+elif [[ "${HADOOP_VERSION}" == "HDP2" ]]; then
+  sed -e 's/<!-- Hadoop 2 requirements -->/<!-- Hadoop 2 requirements 
--><!--/' \
+      -e 's/<!-- End Hadoop 2 requirements -->/--><!-- End Hadoop 2 
requirements -->/' \
+      "${CONF_DIR}/$ACCUMULO_SITE" > temp
+  mv temp "${CONF_DIR}/$ACCUMULO_SITE"
+  sed -e 's/<!-- HDP 2.2 requirements -->/<!-- HDP 2.2 requirements --><!--/' \
+      -e 's/<!-- End HDP 2.2 requirements -->/--><!-- End HDP 2.2 requirements 
-->/' \
+      "${CONF_DIR}/$ACCUMULO_SITE" > temp
+  mv temp "${CONF_DIR}/$ACCUMULO_SITE"
+  sed -e 's/<!-- IOP 4.1 requirements -->/<!-- IOP 4.1 requirements --><!--/' \
+      -e 's/<!-- End IOP 4.1 requirements -->/--><!-- End IOP 4.1 requirements 
-->/' \
+      "${CONF_DIR}/$ACCUMULO_SITE" > temp
+  mv temp "${CONF_DIR}/$ACCUMULO_SITE"
+elif [[ "${HADOOP_VERSION}" == "HDP2.2" ]]; then
+  sed -e 's/<!-- Hadoop 2 requirements -->/<!-- Hadoop 2 requirements 
--><!--/' \
+      -e 's/<!-- End Hadoop 2 requirements -->/--><!-- End Hadoop 2 
requirements -->/' \
+      "${CONF_DIR}/$ACCUMULO_SITE" > temp
+  mv temp "${CONF_DIR}/$ACCUMULO_SITE"
+  sed -e 's/<!-- HDP 2.0 requirements -->/<!-- HDP 2.0 requirements --><!--/' \
+      -e 's/<!-- End HDP 2.0 requirements -->/--><!-- End HDP 2.0 requirements 
-->/' \
+      "${CONF_DIR}/$ACCUMULO_SITE" > temp
+  mv temp "${CONF_DIR}/$ACCUMULO_SITE"
+  sed -e 's/<!-- IOP 4.1 requirements -->/<!-- IOP 4.1 requirements --><!--/' \
+      -e 's/<!-- End IOP 4.1 requirements -->/--><!-- End IOP 4.1 requirements 
-->/' \
+      "${CONF_DIR}/$ACCUMULO_SITE" > temp
+  mv temp "${CONF_DIR}/$ACCUMULO_SITE"
+elif [[ "${HADOOP_VERSION}" == "IOP4.1" ]]; then
+  sed -e 's/<!-- Hadoop 2 requirements -->/<!-- Hadoop 2 requirements 
--><!--/' \
+      -e 's/<!-- End Hadoop 2 requirements -->/--><!-- End Hadoop 2 
requirements -->/' \
+      "${CONF_DIR}/$ACCUMULO_SITE" > temp
+  mv temp "${CONF_DIR}/$ACCUMULO_SITE"
+  sed -e 's/<!-- HDP 2.0 requirements -->/<!-- HDP 2.0 requirements --><!--/' \
+      -e 's/<!-- End HDP 2.0 requirements -->/--><!-- End HDP 2.0 requirements 
-->/' \
+      "${CONF_DIR}/$ACCUMULO_SITE" > temp
+  mv temp "${CONF_DIR}/$ACCUMULO_SITE"
+  sed -e 's/<!-- HDP 2.2 requirements -->/<!-- HDP 2.2 requirements --><!--/' \
+      -e 's/<!-- End HDP 2.2 requirements -->/--><!-- End HDP 2.2 requirements 
-->/' \
+      "${CONF_DIR}/$ACCUMULO_SITE" > temp
+  mv temp "${CONF_DIR}/$ACCUMULO_SITE"
+fi
+
+#Additional setup steps for native configuration.
+if [[ ${TYPE} == native ]]; then
+  if [[ $(uname) == Linux ]]; then
+    if [[ -z $HADOOP_PREFIX ]]; then
+      echo "WARNING: HADOOP_PREFIX not set, cannot automatically configure 
LD_LIBRARY_PATH to include Hadoop native libraries"
+    else
+      NATIVE_LIB=$(readlink -ef $(dirname $(for x in $(find $HADOOP_PREFIX 
-name libhadoop.so); do ld $x 2>/dev/null && echo $x && break; done) 
2>>/dev/null) 2>>/dev/null)
+      if [[ -z $NATIVE_LIB ]]; then
+        echo -e "WARNING: The Hadoop native libraries could not be found for 
your sytem in: $HADOOP_PREFIX"
+      else
+        sed "/# Should the monitor/ i export 
LD_LIBRARY_PATH=${NATIVE_LIB}:\${LD_LIBRARY_PATH}" ${CONF_DIR}/$ACCUMULO_ENV > 
temp
+        mv temp "${CONF_DIR}/$ACCUMULO_ENV"
+        echo -e "Added ${NATIVE_LIB} to the LD_LIBRARY_PATH"
+      fi
+    fi
+  fi
+  echo -e "Please remember to compile the Accumulo native libraries using the 
bin/build_native_library.sh script and to set the LD_LIBRARY_PATH variable in 
the ${CONF_DIR}/accumulo-env.sh script if needed."
+fi
+echo "Setup complete"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/158cf16d/assemble/libexec/gen-monitor-cert.sh
----------------------------------------------------------------------
diff --git a/assemble/libexec/gen-monitor-cert.sh 
b/assemble/libexec/gen-monitor-cert.sh
new file mode 100755
index 0000000..46263ce
--- /dev/null
+++ b/assemble/libexec/gen-monitor-cert.sh
@@ -0,0 +1,84 @@
+#! /usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Start: Resolve Script Directory
+SOURCE="${BASH_SOURCE[0]}"
+while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a 
symlink
+   libexec=$( cd -P "$( dirname "$SOURCE" )" && pwd )
+   SOURCE=$(readlink "$SOURCE")
+   [[ $SOURCE != /* ]] && SOURCE="$libexec/$SOURCE" # if $SOURCE was a 
relative symlink, we need to resolve it relative to the path where the symlink 
file was located
+done
+libexec=$( cd -P "$( dirname "$SOURCE" )" && pwd )
+# Stop: Resolve Script Directory
+
+source "$libexec"/load-env.sh
+
+ALIAS="default"
+KEYPASS=$(LC_CTYPE=C tr -dc '#-~' < /dev/urandom | tr -d '<>&' | head -c 20)
+STOREPASS=$(LC_CTYPE=C tr -dc '#-~' < /dev/urandom | tr -d '<>&' | head -c 20)
+KEYSTOREPATH="$ACCUMULO_CONF_DIR/keystore.jks"
+TRUSTSTOREPATH="$ACCUMULO_CONF_DIR/conf/cacerts.jks"
+CERTPATH="$ACCUMULO_CONF_DIR/server.cer"
+
+if [[ -e "$KEYSTOREPATH" ]]; then
+   rm -i "$KEYSTOREPATH"
+   if [[ -e "$KEYSTOREPATH" ]]; then
+      echo "KeyStore already exists, exiting"
+      exit 1
+   fi
+fi
+
+if [[ -e "$TRUSTSTOREPATH" ]]; then
+   rm -i "$TRUSTSTOREPATH"
+   if [[ -e "$TRUSTSTOREPATH" ]]; then
+      echo "TrustStore already exists, exiting"
+      exit 2
+   fi
+fi
+
+if [[ -e "$CERTPATH" ]]; then
+   rm -i "$CERTPATH"
+   if [[ -e "$CERTPATH" ]]; then
+      echo "Certificate already exists, exiting"
+      exit 3
+  fi
+fi
+
+"${JAVA_HOME}/bin/keytool" -genkey -alias "$ALIAS" -keyalg RSA -keypass 
"$KEYPASS" -storepass "$KEYPASS" -keystore "$KEYSTOREPATH"
+"${JAVA_HOME}/bin/keytool" -export -alias "$ALIAS" -storepass "$KEYPASS" -file 
"$CERTPATH" -keystore "$KEYSTOREPATH"
+"${JAVA_HOME}/bin/keytool" -import -v -trustcacerts -alias "$ALIAS" -file 
"$CERTPATH" -keystore "$TRUSTSTOREPATH" -storepass "$STOREPASS" <<< "yes"
+
+echo
+echo "keystore and truststore generated.  now add the following to 
accumulo-site.xml:"
+echo
+echo "    <property>"
+echo "      <name>monitor.ssl.keyStore</name>"
+echo "      <value>$KEYSTOREPATH</value>"
+echo "    </property>"
+echo "    <property>"
+echo "      <name>monitor.ssl.keyStorePassword</name>"
+echo "      <value>$KEYPASS</value>"
+echo "    </property>"
+echo "    <property>"
+echo "      <name>monitor.ssl.trustStore</name>"
+echo "      <value>$TRUSTSTOREPATH</value>"
+echo "    </property>"
+echo "    <property>"
+echo "      <name>monitor.ssl.trustStorePassword</name>"
+echo "      <value>$STOREPASS</value>"
+echo "    </property>"
+echo

Reply via email to