http://git-wip-us.apache.org/repos/asf/accumulo/blob/ab0d6fc3/docs/src/main/resources/examples/README.filedata
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/README.filedata 
b/docs/src/main/resources/examples/README.filedata
index cfb41ba..a94d493 100644
--- a/docs/src/main/resources/examples/README.filedata
+++ b/docs/src/main/resources/examples/README.filedata
@@ -40,7 +40,7 @@ Open the accumulo shell and look at the data. The row is the 
MD5 hash of the fil
 
 Run the CharacterHistogram MapReduce to add some information about the file.
 
-    $ ./lib/scripts/tool.sh lib/accumulo-examples-simple.jar 
org.apache.accumulo.examples.simple.filedata.CharacterHistogram -i instance -z 
zookeepers -u username -p password -t dataTable --auths exampleVis --vis 
exampleVis
+    $ ./contrib/tool.sh lib/accumulo-examples-simple.jar 
org.apache.accumulo.examples.simple.filedata.CharacterHistogram -i instance -z 
zookeepers -u username -p password -t dataTable --auths exampleVis --vis 
exampleVis
 
 Scan again to see the histogram stored in the 'info' column family.
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ab0d6fc3/docs/src/main/resources/examples/README.mapred
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/README.mapred 
b/docs/src/main/resources/examples/README.mapred
index eccf598..ddd0dbf 100644
--- a/docs/src/main/resources/examples/README.mapred
+++ b/docs/src/main/resources/examples/README.mapred
@@ -50,7 +50,7 @@ for the column family count.
 
 After creating the table, run the word count map reduce job.
 
-    $ ./lib/scripts/tool.sh lib/accumulo-examples-simple.jar 
org.apache.accumulo.examples.simple.mapreduce.WordCount -i instance -z 
zookeepers  --input /user/username/wc -t wordCount -u username -p password
+    $ ./contrib/tool.sh lib/accumulo-examples-simple.jar 
org.apache.accumulo.examples.simple.mapreduce.WordCount -i instance -z 
zookeepers  --input /user/username/wc -t wordCount -u username -p password
 
     11/02/07 18:20:11 INFO input.FileInputFormat: Total input paths to process 
: 1
     11/02/07 18:20:12 INFO mapred.JobClient: Running job: job_201102071740_0003
@@ -134,14 +134,14 @@ Because the basic WordCount example uses Opts to parse 
its arguments
 the basic WordCount example by calling the same command as explained above
 except replacing the password with the token file (rather than -p, use -tf).
 
-  $ ./lib/scripts/tool.sh lib/accumulo-examples-simple.jar 
org.apache.accumulo.examples.simple.mapreduce.WordCount -i instance -z 
zookeepers  --input /user/username/wc -t wordCount -u username -tf tokenfile
+  $ ./contrib/tool.sh lib/accumulo-examples-simple.jar 
org.apache.accumulo.examples.simple.mapreduce.WordCount -i instance -z 
zookeepers  --input /user/username/wc -t wordCount -u username -tf tokenfile
 
 In the above examples, username was 'root' and tokenfile was 'root.pw'
 
 However, if you don't want to use the Opts class to parse arguments,
 the TokenFileWordCount is an example of using the token file manually.
 
-  $ ./lib/scripts/tool.sh lib/accumulo-examples-simple.jar 
org.apache.accumulo.examples.simple.mapreduce.TokenFileWordCount instance 
zookeepers username tokenfile /user/username/wc wordCount
+  $ ./contrib/tool.sh lib/accumulo-examples-simple.jar 
org.apache.accumulo.examples.simple.mapreduce.TokenFileWordCount instance 
zookeepers username tokenfile /user/username/wc wordCount
 
 The results should be the same as the WordCount example except that the
 authentication token was not stored in the configuration. It was instead

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ab0d6fc3/docs/src/main/resources/examples/README.regex
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/README.regex 
b/docs/src/main/resources/examples/README.regex
index 1fe9af9..05ea4de 100644
--- a/docs/src/main/resources/examples/README.regex
+++ b/docs/src/main/resources/examples/README.regex
@@ -41,7 +41,7 @@ in parallel and will store the results in files in hdfs.
 
 The following will search for any rows in the input table that starts with 
"dog":
 
-    $ ./lib/scripts/tool.sh lib/accumulo-examples-simple.jar 
org.apache.accumulo.examples.simple.mapreduce.RegexExample -u user -p passwd -i 
instance -t input --rowRegex 'dog.*' --output /tmp/output
+    $ ./contrib/tool.sh lib/accumulo-examples-simple.jar 
org.apache.accumulo.examples.simple.mapreduce.RegexExample -u user -p passwd -i 
instance -t input --rowRegex 'dog.*' --output /tmp/output
 
     $ hadoop fs -ls /tmp/output
     Found 3 items

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ab0d6fc3/docs/src/main/resources/examples/README.rowhash
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/README.rowhash 
b/docs/src/main/resources/examples/README.rowhash
index 4c84ca5..897a92c 100644
--- a/docs/src/main/resources/examples/README.rowhash
+++ b/docs/src/main/resources/examples/README.rowhash
@@ -38,7 +38,7 @@ put a trivial amount of data into accumulo using the accumulo 
shell:
 The RowHash class will insert a hash for each row in the database if it 
contains a
 specified colum. Here's how you run the map/reduce job
 
-    $ ./lib/scripts/tool.sh lib/accumulo-examples-simple.jar 
org.apache.accumulo.examples.simple.mapreduce.RowHash -u user -p passwd -i 
instance -t input --column cf:cq
+    $ ./contrib/tool.sh lib/accumulo-examples-simple.jar 
org.apache.accumulo.examples.simple.mapreduce.RowHash -u user -p passwd -i 
instance -t input --column cf:cq
 
 Now we can scan the table and see the hashes:
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ab0d6fc3/docs/src/main/resources/examples/README.tabletofile
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/README.tabletofile 
b/docs/src/main/resources/examples/README.tabletofile
index f3d49e8..c07c60b 100644
--- a/docs/src/main/resources/examples/README.tabletofile
+++ b/docs/src/main/resources/examples/README.tabletofile
@@ -40,7 +40,7 @@ write the key/value pairs to a file in HDFS.
 
 The following will extract the rows containing the column "cf:cq":
 
-    $ ./lib/scripts/tool.sh lib/accumulo-examples-simple.jar 
org.apache.accumulo.examples.simple.mapreduce.TableToFile -u user -p passwd -i 
instance -t input --columns cf:cq --output /tmp/output
+    $ ./contrib/tool.sh lib/accumulo-examples-simple.jar 
org.apache.accumulo.examples.simple.mapreduce.TableToFile -u user -p passwd -i 
instance -t input --columns cf:cq --output /tmp/output
 
     $ hadoop fs -ls /tmp/output
     -rw-r--r--   1 username supergroup          0 2013-01-10 14:44 
/tmp/output/_SUCCESS

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ab0d6fc3/docs/src/main/resources/examples/README.terasort
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/README.terasort 
b/docs/src/main/resources/examples/README.terasort
index 4db6ce4..5401b91 100644
--- a/docs/src/main/resources/examples/README.terasort
+++ b/docs/src/main/resources/examples/README.terasort
@@ -22,7 +22,7 @@ hadoop terasort benchmark.
 
 To run this example you run it with arguments describing the amount of data:
 
-    $ ./lib/scripts/tool.sh lib/accumulo-examples-simple.jar 
org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest \
+    $ ./contrib/tool.sh lib/accumulo-examples-simple.jar 
org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest \
     -i instance -z zookeepers -u user -p password \
     --count 10 \
     --minKeySize 10 \

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ab0d6fc3/minicluster/src/test/java/org/apache/accumulo/cluster/standalone/StandaloneClusterControlTest.java
----------------------------------------------------------------------
diff --git 
a/minicluster/src/test/java/org/apache/accumulo/cluster/standalone/StandaloneClusterControlTest.java
 
b/minicluster/src/test/java/org/apache/accumulo/cluster/standalone/StandaloneClusterControlTest.java
index 7badef9..1d8a4dc 100644
--- 
a/minicluster/src/test/java/org/apache/accumulo/cluster/standalone/StandaloneClusterControlTest.java
+++ 
b/minicluster/src/test/java/org/apache/accumulo/cluster/standalone/StandaloneClusterControlTest.java
@@ -46,7 +46,7 @@ public class StandaloneClusterControlTest {
 
   @Test
   public void mapreduceLaunchesLocally() throws Exception {
-    final String toolPath = "/usr/lib/accumulo/lib/scripts/tool.sh";
+    final String toolPath = "/usr/lib/accumulo/contrib/tool.sh";
     final String jar = "/home/user/my_project.jar";
     final Class<?> clz = Object.class;
     final String myClass = clz.getName();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ab0d6fc3/proxy/README
----------------------------------------------------------------------
diff --git a/proxy/README b/proxy/README
index a25acec..7880de7 100644
--- a/proxy/README
+++ b/proxy/README
@@ -46,7 +46,7 @@ Accumulo 1.5 instance, or when run standalone in the Mock 
configuration.
 
 Run the following command.
 
- ./bin/accumulo proxy -p ./opt/proxy/proxy.properties
+ ./bin/accumulo proxy -p ./proxy/proxy.properties
 
 5. Clients
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ab0d6fc3/test/system/continuous/run-moru.sh
----------------------------------------------------------------------
diff --git a/test/system/continuous/run-moru.sh 
b/test/system/continuous/run-moru.sh
index b3f3a75..3c73ddb 100755
--- a/test/system/continuous/run-moru.sh
+++ b/test/system/continuous/run-moru.sh
@@ -33,5 +33,5 @@ CONTINUOUS_CONF_DIR=${CONTINUOUS_CONF_DIR:-${bin}}
 
 SERVER_LIBJAR="$ACCUMULO_HOME/lib/accumulo-test.jar"
 
-"$ACCUMULO_HOME/lib/scripts/tool.sh" "$SERVER_LIBJAR" 
org.apache.accumulo.test.continuous.ContinuousMoru -libjars "$SERVER_LIBJAR" -i 
"$INSTANCE_NAME" -z "$ZOO_KEEPERS" -u "$USER" -p "$PASS" --table "$TABLE" --min 
"$MIN" --max "$MAX" --maxColF "$MAX_CF" --maxColQ "$MAX_CQ" --batchMemory 
"$MAX_MEM" --batchLatency "$MAX_LATENCY" --batchThreads "$NUM_THREADS" 
--maxMappers "$VERIFY_MAX_MAPS"
+"$ACCUMULO_HOME/contrib/tool.sh" "$SERVER_LIBJAR" 
org.apache.accumulo.test.continuous.ContinuousMoru -libjars "$SERVER_LIBJAR" -i 
"$INSTANCE_NAME" -z "$ZOO_KEEPERS" -u "$USER" -p "$PASS" --table "$TABLE" --min 
"$MIN" --max "$MAX" --maxColF "$MAX_CF" --maxColQ "$MAX_CQ" --batchMemory 
"$MAX_MEM" --batchLatency "$MAX_LATENCY" --batchThreads "$NUM_THREADS" 
--maxMappers "$VERIFY_MAX_MAPS"
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ab0d6fc3/test/system/continuous/run-verify.sh
----------------------------------------------------------------------
diff --git a/test/system/continuous/run-verify.sh 
b/test/system/continuous/run-verify.sh
index 6d2d048..aa56643 100755
--- a/test/system/continuous/run-verify.sh
+++ b/test/system/continuous/run-verify.sh
@@ -39,4 +39,4 @@ AUTH_OPT="";
 SCAN_OPT=--offline
 [[ $SCAN_OFFLINE == false ]] && SCAN_OPT=
 
-"$ACCUMULO_HOME/lib/scripts/tool.sh" "$SERVER_LIBJAR" 
org.apache.accumulo.test.continuous.ContinuousVerify 
-Dmapreduce.job.reduce.slowstart.completedmaps=0.95 -libjars "$SERVER_LIBJAR" 
"$AUTH_OPT" -i "$INSTANCE_NAME" -z "$ZOO_KEEPERS" -u "$USER" -p "$PASS" --table 
"$TABLE" --output "$VERIFY_OUT" --maxMappers "$VERIFY_MAX_MAPS" --reducers 
"$VERIFY_REDUCERS" "$SCAN_OPT"
+"$ACCUMULO_HOME/contrib/tool.sh" "$SERVER_LIBJAR" 
org.apache.accumulo.test.continuous.ContinuousVerify 
-Dmapreduce.job.reduce.slowstart.completedmaps=0.95 -libjars "$SERVER_LIBJAR" 
"$AUTH_OPT" -i "$INSTANCE_NAME" -z "$ZOO_KEEPERS" -u "$USER" -p "$PASS" --table 
"$TABLE" --output "$VERIFY_OUT" --maxMappers "$VERIFY_MAX_MAPS" --reducers 
"$VERIFY_REDUCERS" "$SCAN_OPT"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ab0d6fc3/test/system/upgrade_test.sh
----------------------------------------------------------------------
diff --git a/test/system/upgrade_test.sh b/test/system/upgrade_test.sh
index 0a258a4..a5c7863 100755
--- a/test/system/upgrade_test.sh
+++ b/test/system/upgrade_test.sh
@@ -54,7 +54,7 @@ fi
 
 echo "==== Starting Current ==="
 
-"$CURR/bin/accumulo-cluster start"
+"$CURR/bin/accumulo-cluster" start
 "$CURR/bin/accumulo" org.apache.accumulo.test.VerifyIngest --size 50 
--timestamp 1 --random 56 --rows 400000 --start 0 --cols 1 -i $INSTANCE -u root 
-p secret
 echo "compact -t test_ingest -w" | $CURR/bin/accumulo shell -u root -p secret
 "$CURR/bin/accumulo" org.apache.accumulo.test.VerifyIngest --size 50 
--timestamp 1 --random 56 --rows 400000 --start 0 --cols 1 -i $INSTANCE -u root 
-p secret
@@ -65,13 +65,13 @@ echo "compact -t test_ingest -w" | $CURR/bin/accumulo shell 
-u root -p secret
 echo "compact -t test_ingest -w" | $CURR/bin/accumulo shell -u root -p secret
 "$CURR/bin/accumulo" org.apache.accumulo.test.VerifyIngest --size 50 
--timestamp 2 --random 57 --rows 500000 --start 0 --cols 1 -i $INSTANCE -u root 
-p secret
 
-"$CURR/bin/accumulo-cluster stop"
-"$CURR/bin/accumulo-cluster start"
+"$CURR/bin/accumulo-cluster" stop
+"$CURR/bin/accumulo-cluster" start
 
 "$CURR/bin/accumulo" org.apache.accumulo.test.VerifyIngest --size 50 
--timestamp 2 --random 57 --rows 500000 --start 0 --cols 1 -i $INSTANCE -u root 
-p secret
 
 pkill -9 -f accumulo.start
-"$CURR/bin/accumulo-cluster start"
+"$CURR/bin/accumulo-cluster" start
 
 "$CURR/bin/accumulo" org.apache.accumulo.test.VerifyIngest --size 50 
--timestamp 2 --random 57 --rows 500000 --start 0 --cols 1 -i $INSTANCE -u root 
-p secret
 

Reply via email to