This is an automated email from the ASF dual-hosted git repository.

jmark99 pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo-examples.git


The following commit(s) were added to refs/heads/main by this push:
     new ef2e04e  Avoid checking Accumulo table exists before creation (#74)
ef2e04e is described below

commit ef2e04e78cc08ce1d1a4b3f67947e5fe1f874c89
Author: Mark Owens <jmar...@apache.org>
AuthorDate: Fri May 7 08:16:44 2021 -0400

    Avoid checking Accumulo table exists before creation (#74)
    
    The initial focus of this ticket was to remove the check for table 
existence and just create and catch and then ignore TableExistsException 
instead.
    
    After initial work (see previous comments), it was decided to update the 
code to work more realistically. Primarily, do not ignore the exceptions and at 
least alert the user that the table existed prior and let the user decide 
whether to remove the table and re-run the example.
    
    Another concern was the fear of interfering with an existing table on a 
users system.
    
    The primary changes then became to update table creation and provide 
feedback via logs to situations where prior tables already existed. In order to 
prevent table name collision, the examples were modified to make use of 
Accumulo namespaces. The classes and documentation were update to create an 
'examples' namespace wherein all the table are created.
    
    Along the way several other smaller tweaks were made also. Some of these 
are listed below.
    
    * The process of table creation was re-factored into a <code>Commons</code> 
class. All examples now use a method in that class to create both the namespace 
and tablename. A couple of constants used throughout the example classes are 
defined there as well.
    * The bloom classes now have a couple of methods that helped to remove some 
redundant code.
    * An unneeded import was removed from the CharacterHistogram.java class.
    * Most of the use of System.out.println was replace with logging instead.
    * Update SequentialBatchWriter to exit if required table for scanning does 
not exist.
    * A majority of the documentation was updated to included the creation of 
the necessary 'examples' namespace.
    * The config command was updated to use the table.class.loader.context 
rather than the deprecated table.classpath.context.
    * Update the constraints example to work with the new location of the 
contraints classes in Accumulo.
    * Update filedata documentation to note that the ChunkCombiner class must 
be avaiable in the accumulo lib directory or on the classpath somewhere in 
order to scan the create examples.dataTable.
    
    Closes #13
---
 .gitignore                                         |  1 +
 docs/batch.md                                      |  4 +-
 docs/bloom.md                                      | 14 ++--
 docs/bulkIngest.md                                 |  2 +-
 docs/classpath.md                                  | 27 ++++----
 docs/combiner.md                                   | 29 ++++----
 docs/compactionStrategy.md                         | 29 ++++----
 docs/constraints.md                                |  2 -
 docs/deleteKeyValuePair.md                         | 57 +++++++++-------
 docs/dirlist.md                                    | 16 ++---
 docs/export.md                                     | 37 ++++++-----
 docs/filedata.md                                   | 13 ++--
 docs/filter.md                                     | 41 ++++++------
 docs/helloworld.md                                 |  4 +-
 docs/isolation.md                                  |  6 +-
 docs/regex.md                                      | 12 ++--
 docs/reservations.md                               |  9 +--
 docs/rgbalancer.md                                 | 19 +++---
 docs/rowhash.md                                    | 13 ++--
 docs/sample.md                                     | 71 ++++++++++----------
 docs/shard.md                                      | 13 ++--
 docs/tabletofile.md                                | 22 +++----
 docs/terasort.md                                   |  9 ++-
 docs/visibility.md                                 | 77 +++++++++++-----------
 docs/wordcount.md                                  | 12 ++--
 .../java/org/apache/accumulo/spark/CopyPlus5K.java | 18 ++++-
 .../java/org/apache/accumulo/examples/Common.java  | 51 ++++++++++++++
 .../accumulo/examples/bloom/BloomBatchScanner.java | 27 ++++----
 .../accumulo/examples/bloom/BloomCommon.java       | 13 ++++
 .../accumulo/examples/bloom/BloomFilters.java      | 60 +++++++++--------
 .../examples/bloom/BloomFiltersNotFound.java       | 38 ++++++-----
 .../examples/client/RandomBatchScanner.java        | 29 ++++----
 .../accumulo/examples/client/ReadWriteExample.java | 45 +++++++------
 .../accumulo/examples/client/RowOperations.java    | 36 ++++------
 .../examples/client/SequentialBatchWriter.java     | 19 +++---
 .../accumulo/examples/client/TracingExample.java   |  3 +-
 .../accumulo/examples/combiner/StatsCombiner.java  |  2 +-
 .../constraints/AlphaNumKeyConstraint.java         | 47 +++++++------
 .../examples/constraints/ConstraintsCommon.java    |  9 +++
 .../examples/constraints/MaxMutationSize.java      | 30 ++++-----
 .../constraints/NumericValueConstraint.java        | 43 +++++-------
 .../apache/accumulo/examples/dirlist/Ingest.java   | 44 ++++++++-----
 .../examples/filedata/CharacterHistogram.java      |  1 -
 .../accumulo/examples/filedata/FileDataIngest.java | 18 +++--
 .../accumulo/examples/helloworld/Insert.java       | 12 ++--
 .../apache/accumulo/examples/helloworld/Read.java  |  3 +-
 .../examples/isolation/InterferenceTest.java       | 25 ++++---
 .../accumulo/examples/mapreduce/NGramIngest.java   |  3 +-
 .../accumulo/examples/mapreduce/WordCount.java     | 12 ++--
 .../examples/mapreduce/bulk/BulkIngestExample.java | 14 ++--
 .../examples/mapreduce/bulk/SetupTable.java        | 21 +++---
 .../examples/mapreduce/bulk/VerifyIngest.java      | 19 +++---
 .../accumulo/examples/sample/SampleExample.java    | 15 +++--
 .../apache/accumulo/examples/shard/Reverse.java    |  4 +-
 54 files changed, 672 insertions(+), 528 deletions(-)

diff --git a/.gitignore b/.gitignore
index 614659a..fca81e8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,3 +6,4 @@
 /.idea
 /examples.conf
 /conf/env.sh
+/conf/log4j.properties
diff --git a/docs/batch.md b/docs/batch.md
index 305a732..af9205e 100644
--- a/docs/batch.md
+++ b/docs/batch.md
@@ -30,8 +30,8 @@ Run `SequentialBatchWriter` to add 10000 entries with random 
50 bytes values to
 Verify data was ingested by scanning the table using the Accumulo shell:
 
     $ accumulo shell
-    root@instance> table batch
-    root@instance batch> scan
+    root@instance> table examples.batch
+    root@instance examples.batch> scan
 
 Run `RandomBatchScanner` to perform 1000 random queries and verify the results.
 
diff --git a/docs/bloom.md b/docs/bloom.md
index 528bbb5..8a38df5 100644
--- a/docs/bloom.md
+++ b/docs/bloom.md
@@ -39,10 +39,10 @@ likely contain entries for the query, all files will be 
interrogated.
     
     $ ./bin/runex bloom.BloomBatchScanner
 
-    Scanning bloom_test1 with seed 7
+    Scanning example.bloom_test1 with seed 7
     Scan finished! 282.49 lookups/sec, 1.77 secs, 500 results
     All expected rows were scanned
-    Scanning bloom_test2 with seed 7
+    Scanning examples.bloom_test2 with seed 7
     Scan finished! 704.23 lookups/sec, 0.71 secs, 500 results
     All expected rows were scanned
 
@@ -52,8 +52,8 @@ you will need the table ID, which can be found with the 
following shell command.
     $ accumulo shell -u username -p password -e 'tables -l'
     accumulo.metadata    =>        !0
     accumulo.root        =>        +r
-    bloom_test1          =>         2
-    bloom_test2          =>         3
+    examples.bloom_test1 =>         2
+    examples.bloom_test2 =>         3
     trace                =>         1
 
 So the table id for bloom_test2 is 3. The command below shows what files this
@@ -103,10 +103,10 @@ One million random values initialized with seed 7 are 
inserted into each table.
 Once the flush completes, 500 random queries are done against each table but 
with a different seed.
 Even when nothing is found the lookups are faster against the table with the 
bloom filters.
 
-    Writing data to bloom_test3 and bloom_test4 (bloom filters enabled)
-    Scanning bloom_test3 with seed 8
+    Writing data to examples.bloom_test3 and examples.bloom_test4 (bloom 
filters enabled)
+    Scanning examples.bloom_test3 with seed 8
     Scan finished! 780.03 lookups/sec, 0.64 secs, 0 results
     Did not find 500
-    Scanning bloom_test4 with seed 8
+    Scanning examples.bloom_test4 with seed 8
     Scan finished! 1736.11 lookups/sec, 0.29 secs, 0 results
     Did not find 500
diff --git a/docs/bulkIngest.md b/docs/bulkIngest.md
index 6edee37..28cd3c8 100644
--- a/docs/bulkIngest.md
+++ b/docs/bulkIngest.md
@@ -20,7 +20,7 @@ This is an example of how to bulk ingest data into Accumulo 
using mapReduce.
 
 This tutorial uses the following Java classes.
 
- * [SetupTable.java] - creates the table, 'test_bulk', and sets two split 
points.
+ * [SetupTable.java] - creates the table, 'examples.test_bulk', and sets two 
split points.
  * [BulkIngestExample.java] - creates some data to ingest and then ingests the 
data using mapReduce
  * [VerifyIngest.java] - checks that the data was ingested
  
diff --git a/docs/classpath.md b/docs/classpath.md
index 37526eb..8e8cc28 100644
--- a/docs/classpath.md
+++ b/docs/classpath.md
@@ -33,40 +33,41 @@ is located within the Accumulo source distribution.
 
 Execute following in Accumulo shell to setup classpath context
 
-    root@test15> config -s general.vfs.context.classpath.cx1=hdfs://<namenode 
host>:<namenode port>/user1/lib/[^.].*.jar
+    root@uno> config -s general.vfs.context.classpath.cx1=hdfs://<namenode 
host>:<namenode port>/user1/lib/[^.].*.jar
 
-Create a table
+Create a namespace and table
 
-    root@test15> createtable nofoo
+    root@uno> createnamespace examples
+    root@uno> createtable examples.nofoo
 
 The following command makes this table use the configured classpath context
 
-    root@test15 nofoo> config -t nofoo -s table.classpath.context=cx1
+    root@uno examples.nofoo> config -t examples.nofoo -s 
table.class.loader.context=cx1
 
 The following command configures an iterator that's in FooFilter.jar
 
-    root@test15 nofoo> setiter -n foofilter -p 10 -scan -minc -majc -class 
org.apache.accumulo.test.FooFilter
+    root@uno examples.nofoo> setiter -n foofilter -p 10 -scan -minc -majc 
-class org.apache.accumulo.test.FooFilter
     Filter accepts or rejects each Key/Value pair
     ----------> set FooFilter parameter negate, default false keeps k/v that 
pass accept method, true rejects k/v that pass accept method: false
 
 The commands below show the filter is working.
 
-    root@test15 nofoo> insert foo1 f1 q1 v1
-    root@test15 nofoo> insert noo1 f1 q1 v2
-    root@test15 nofoo> scan
+    root@uno examples.nofoo> insert foo1 f1 q1 v1
+    root@uno examples.nofoo> insert noo1 f1 q1 v2
+    root@uno examples.nofoo> scan
     noo1 f1:q1 []    v2
-    root@test15 nofoo>
+    root@uno examples.nofoo>
 
 Below, an attempt is made to add the FooFilter to a table that's not configured
 to use the classpath context cx1. This fails until the table is configured to
 use cx1.
 
-    root@test15 nofoo> createtable nofootwo
-    root@test15 nofootwo> setiter -n foofilter -p 10 -scan -minc -majc -class 
org.apache.accumulo.test.FooFilter
+    root@uno examples.nofoo> createtable examples.nofootwo
+    root@uno examples.nofootwo> setiter -n foofilter -p 10 -scan -minc -majc 
-class org.apache.accumulo.test.FooFilter
         2013-05-03 12:49:35,943 [shell.Shell] ERROR: 
org.apache.accumulo.shell.ShellCommandException: Command could 
     not be initialized (Unable to load org.apache.accumulo.test.FooFilter; 
class not found.)
-    root@test15 nofootwo> config -t nofootwo -s table.classpath.context=cx1
-    root@test15 nofootwo> setiter -n foofilter -p 10 -scan -minc -majc -class 
org.apache.accumulo.test.FooFilter
+    root@uno examples.nofootwo> config -t nofootwo -s 
table.class.loader.context=cx1
+    root@uno examples.nofootwo> setiter -n foofilter -p 10 -scan -minc -majc 
-class org.apache.accumulo.test.FooFilter
     Filter accepts or rejects each Key/Value pair
     ----------> set FooFilter parameter negate, default false keeps k/v that 
pass accept method, true rejects k/v that pass accept method: false
 
diff --git a/docs/combiner.md b/docs/combiner.md
index 48dc7e3..7ee9cd1 100644
--- a/docs/combiner.md
+++ b/docs/combiner.md
@@ -29,43 +29,44 @@ tar distribution.
 
     Shell - Apache Accumulo Interactive Shell
     -
-    - version: 1.5.0
+    - version: 2.1.0-SNAPSHOT
     - instance name: instance
     - instance id: 00000000-0000-0000-0000-000000000000
     -
     - type 'help' for a list of available commands
     -
-    username@instance> createtable runners
-    username@instance runners> setiter -t runners -p 10 -scan -minc -majc -n 
decStats -class org.apache.accumulo.examples.combiner.StatsCombiner
+    username@instance> createnamespace examples
+    username@instance> createtable examples.runners
+    username@instance exampoles.runners> setiter -t runners -p 10 -scan -minc 
-majc -n decStats -class org.apache.accumulo.examples.combiner.StatsCombiner
     Combiner that keeps track of min, max, sum, and count
     ----------> set StatsCombiner parameter all, set to true to apply Combiner 
to every column, otherwise leave blank. if true, columns option will be 
ignored.: 
     ----------> set StatsCombiner parameter columns, <col fam>[:<col 
qual>]{,<col fam>[:<col qual>]} escape non aplhanum chars using %<hex>.: stat
     ----------> set StatsCombiner parameter reduceOnFullCompactionOnly, If 
true, only reduce on full major compactions.  Defaults to false. : 
     ----------> set StatsCombiner parameter radix, radix/base of the numbers: 
10
-    username@instance runners> setiter -t runners -p 11 -scan -minc -majc -n 
hexStats -class org.apache.accumulo.examples.combiner.StatsCombiner
+    username@instance examples.runners> setiter -t runners -p 11 -scan -minc 
-majc -n hexStats -class org.apache.accumulo.examples.combiner.StatsCombiner
     Combiner that keeps track of min, max, sum, and count
     ----------> set StatsCombiner parameter all, set to true to apply Combiner 
to every column, otherwise leave blank. if true, columns option will be 
ignored.: 
     ----------> set StatsCombiner parameter columns, <col fam>[:<col 
qual>]{,<col fam>[:<col qual>]} escape non-alphanum chars using %<hex>.: hstat
     ----------> set StatsCombiner parameter reduceOnFullCompactionOnly, If 
true, only reduce on full major compactions.  Defaults to false. : 
     ----------> set StatsCombiner parameter radix, radix/base of the numbers: 
16
-    username@instance runners> insert 123456 name first Joe
-    username@instance runners> insert 123456 stat marathon 240
-    username@instance runners> scan
+    username@instance examples.runners> insert 123456 name first Joe
+    username@instance examples.runners> insert 123456 stat marathon 240
+    username@instance examples.runners> scan
     123456 name:first []    Joe
     123456 stat:marathon []    240,240,240,1
-    username@instance runners> insert 123456 stat marathon 230
-    username@instance runners> insert 123456 stat marathon 220
-    username@instance runners> scan
+    username@instance examples.runners> insert 123456 stat marathon 230
+    username@instance examples.runners> insert 123456 stat marathon 220
+    username@instance examples.runners> scan
     123456 name:first []    Joe
     123456 stat:marathon []    220,240,690,3
-    username@instance runners> insert 123456 hstat virtualMarathon 6a
-    username@instance runners> insert 123456 hstat virtualMarathon 6b
-    username@instance runners> scan
+    username@instance examples.runners> insert 123456 hstat virtualMarathon 6a
+    username@instance examples.runners> insert 123456 hstat virtualMarathon 6b
+    username@instance examples.runners> scan
     123456 hstat:virtualMarathon []    6a,6b,d5,2
     123456 name:first []    Joe
     123456 stat:marathon []    220,240,690,3
 
-In this example a table is created and the example stats combiner is applied to
+In this example a table is created, and the example stats combiner is applied 
to
 the column family stat and hstat. The stats combiner computes min,max,sum, and
 count. It can be configured to use a different base or radix. In the example
 above the column family stat is configured for base 10 and the column family
diff --git a/docs/compactionStrategy.md b/docs/compactionStrategy.md
index 8b63ae8..594b28d 100644
--- a/docs/compactionStrategy.md
+++ b/docs/compactionStrategy.md
@@ -34,7 +34,8 @@ native libraries built with snappy in order to use snappy 
compression.
 To begin, run the command to create a table for testing.
 
 ```bash
-$ accumulo shell -u <username> -p <password> -e "createtable test1"
+$ accumulo shell -u <username> -p <password> -e "createnamespace examples"
+$ accumulo shell -u <username> -p <password> -e "createtable examples.test1"
 ```
 
 The commands below will configure the BasicCompactionStrategy to:
@@ -43,24 +44,24 @@ The commands below will configure the 
BasicCompactionStrategy to:
  - Compact files less than 100M using snappy.
  
 ```bash
- $ accumulo shell -u <username> -p <password> -e "config -t test1 -s 
table.file.compress.type=snappy"
- $ accumulo shell -u <username> -p <password> -e "config -t test1 -s 
table.majc.compaction.strategy=org.apache.accumulo.tserver.compaction.strategies.BasicCompactionStrategy"
- $ accumulo shell -u <username> -p <password> -e "config -t test1 -s 
table.majc.compaction.strategy.opts.filter.size=250M"
- $ accumulo shell -u <username> -p <password> -e "config -t test1 -s 
table.majc.compaction.strategy.opts.large.compress.threshold=100M"
- $ accumulo shell -u <username> -p <password> -e "config -t test1 -s 
table.majc.compaction.strategy.opts.large.compress.type=gz"
+ $ accumulo shell -u <username> -p <password> -e "config -t examples.test1 -s 
table.file.compress.type=snappy"
+ $ accumulo shell -u <username> -p <password> -e "config -t test1 -s 
examples.table.majc.compaction.strategy=org.apache.accumulo.tserver.compaction.strategies.BasicCompactionStrategy"
+ $ accumulo shell -u <username> -p <password> -e "config -t test1 -s 
examples.table.majc.compaction.strategy.opts.filter.size=250M"
+ $ accumulo shell -u <username> -p <password> -e "config -t test1 -s 
examples.table.majc.compaction.strategy.opts.large.compress.threshold=100M"
+ $ accumulo shell -u <username> -p <password> -e "config -t test1 -s 
examples.table.majc.compaction.strategy.opts.large.compress.type=gz"
 ```
 
 Generate some data and files in order to test the strategy:
 
 ```bash
-$ ./bin/runex client.SequentialBatchWriter -t test1 --start 0 --num 10000 
--size 50
-$ accumulo shell -u <username> -p <password> -e "flush -t test1"
-$ ./bin/runex client.SequentialBatchWriter -t test1 --start 0 --num 11000 
--size 50
-$ accumulo shell -u <username> -p <password> -e "flush -t test1"
-$ ./bin/runex client.SequentialBatchWriter -t test1 --start 0 --num 12000 
--size 50
-$ accumulo shell -u <username> -p <password> -e "flush -t test1"
-$ ./bin/runex client.SequentialBatchWriter -t test1 --start 0 --num 13000 
--size 50
-$ accumulo shell -u <username> -p <password> -e "flush -t test1"
+$ ./bin/runex client.SequentialBatchWriter -t examples.test1 --start 0 --num 
10000 --size 50
+$ accumulo shell -u <username> -p <password> -e "flush -t examples.test1"
+$ ./bin/runex client.SequentialBatchWriter -t examples.test1 --start 0 --num 
11000 --size 50
+$ accumulo shell -u <username> -p <password> -e "flush -t examples.test1"
+$ ./bin/runex client.SequentialBatchWriter -t examples.test1 --start 0 --num 
12000 --size 50
+$ accumulo shell -u <username> -p <password> -e "flush -t examples.test1"
+$ ./bin/runex client.SequentialBatchWriter -t examples.test1 --start 0 --num 
13000 --size 50
+$ accumulo shell -u <username> -p <password> -e "flush -t examples.test1"
 ```
 
 View the tserver log in <accumulo_home>/logs for the compaction and find the 
name of the <rfile> that was compacted for your table. Print info about this 
file using the PrintInfo tool:
diff --git a/docs/constraints.md b/docs/constraints.md
index 71803bc..8231518 100644
--- a/docs/constraints.md
+++ b/docs/constraints.md
@@ -22,8 +22,6 @@ This tutorial uses the following Java classes, which can be 
found in org.apache.
  * [NumericValueConstraint.java] - a constraint that requires numeric string 
values
  * [MaxMutationSize.java] - a constraint that limits the size of mutations 
accepted into a table
 
-Remember to copy the accumulo-examples-\*.jar to Accumulo's 'lib/ext' 
directory.
-
 AlphaNumKeyConstraint prevents insertion of keys with characters not between 
aA and zZ or 0 to 9.  
 NumericValueConstraint prevents insertion of values with characters not 
between 0 and 9. The examples create mutations
 that violate the constraint, throwing an exception.
diff --git a/docs/deleteKeyValuePair.md b/docs/deleteKeyValuePair.md
index 87315a3..b96cd29 100644
--- a/docs/deleteKeyValuePair.md
+++ b/docs/deleteKeyValuePair.md
@@ -20,28 +20,29 @@ This example shows how Accumulo internals handle removing a 
key-value pair
 
 ```
 $ /path/to/accumulo shell -u username -p secret
-username@instance> createtable deleteKeyValuePair
-username@instance deleteKeyValuePair> insert 567890 name first Joe
-username@instance deleteKeyValuePair> insert 567890 name last Smith
-username@instance deleteKeyValuePair> insert 567890 address city Columbia
+username@instance> createnamespace examples
+username@instance> createtable examples.deleteKeyValuePair
+username@instance examples.deleteKeyValuePair> insert 567890 name first Joe
+username@instance examples.deleteKeyValuePair> insert 567890 name last Smith
+username@instance examples.deleteKeyValuePair> insert 567890 address city 
Columbia
 ```
 
 ```
-username@instance deleteKeyValuePair> scan
+username@instance examples.deleteKeyValuePair> scan
 567890 address:city []    Columbia
 567890 name:first []    Joe
 567890 name:last []    Smith
 ```
 
 ```
-username@instance deleteKeyValuePair> flush -w
+username@instance examples.deleteKeyValuePair> flush -w
 2019-04-18 11:01:47,444 [shell.Shell] INFO : Flush of table deleteKeyValuePair 
completed.
 ```
 
-Get the deleteKeyValuePair table id.
+Get the examples.deleteKeyValuePair table id.
 
 ```
-username@instance deleteKeyValuePair> tables -l
+username@instance examples.deleteKeyValuePair> tables -l
 accumulo.metadata    =>        !0
 accumulo.replication =>      +rep
 accumulo.root        =>        +r
@@ -51,7 +52,7 @@ deleteKeyValuePair   =>        1t
 Scan accumulo.metadata table to see the list of RFiles Accumulo is currently 
using.
 
 ```
-username@instance deleteKeyValuePair> scan -t accumulo.metadata -c file -r 1t<
+username@instance examples.deleteKeyValuePair> scan -t accumulo.metadata -c 
file -r 1t<
 1t< file:hdfs://localhost:8020/accumulo/tables/1t/default_tablet/F00007em.rf
 ```
 
@@ -59,6 +60,7 @@ View the contents of RFile and verify each key-value pair's 
deletion flag is fal
 
 ```
 $ /path/to/accumulo rfile-info -d 
hdfs://localhost/accumulo/tables/1t/default_tablet/F00007em.rf
+RFile Version            : 8
 
 Locality group           : <DEFAULT>
     Num   blocks           : 1
@@ -77,27 +79,29 @@ Meta block     : RFile.index
       Raw size             : 141 bytes
       Compressed size      : 97 bytes
       Compression type     : gz
-567890 address:city [] 1555418057811 false -> Columbia
-567890 name:first [] 1555418067848 false -> Joe
-567890 name:last [] 1555418063052 false -> Smith
+      
+567890 address:city [] 1618581304723 false -> Columbia
+567890 name:first [] 1618581291527 false -> Joe
+567890 name:last [] 1618581297272 false -> Smith
 ```
 Delete a key-value pair and view a newly created RFile to verify the deletion 
flag is true.
 
 ```
 $ /path/to/accumulo shell -u username -p secret
-username@instance> table deleteKeyValuePair
-username@instance deleteKeyValuePair> delete 567890 name first
-username@instance deleteKeyValuePair> flush -w
+username@instance> table examples.deleteKeyValuePair
+username@instance examples.deleteKeyValuePair> delete 567890 name first
+username@instance examples.deleteKeyValuePair> flush -w
 ```
 
 ```
-username@instance deleteKeyValuePair> scan -t accumulo.metadata -c file -r 1t<
+username@instance examples.deleteKeyValuePair> scan -t accumulo.metadata -c 
file -r 1t<
 1t< file:hdfs://localhost:8020/accumulo/tables/1t/default_tablet/F00007em.rf
 1t< file:hdfs://localhost:8020/accumulo/tables/1t/default_tablet/F00007fq.rf
 ```
 
 ```
 $ /path/to/accumulo rfile-info -d 
hdfs://localhost/accumulo/tables/1t/default_tablet/F00007fq.rf
+RFile Version            : 8
 
 Locality group           : <DEFAULT>
     Num   blocks           : 1
@@ -116,26 +120,28 @@ Meta block     : RFile.index
       Raw size             : 121 bytes
       Compressed size      : 68 bytes
       Compression type     : gz
-
-567890 name:first [] 1555419184531 true ->
+      
+567890 name:first [] 1618581499491 true ->
 ```
 
 Compact the RFiles and verify the key-value pair was removed.  The new RFile 
will start with 'A'.
 
 ```
 $ /path/to/accumulo shell -u username -p secret
-username@instance deleteKeyValuePair> compact -t deleteKeyValuePair -w
+username@instance> compact -t examples.deleteKeyValuePair -w
 2019-04-17 08:17:15,468 [shell.Shell] INFO : Compacting table ...
-2019-04-17 08:17:16,143 [shell.Shell] INFO : Compaction of table 
deleteKeyValuePair completed for given range
+2019-04-17 08:17:16,143 [shell.Shell] INFO : Compaction of table 
examples.deleteKeyValuePair 
+completed for given range
 ```
 
 ```
-username@instance deleteKeyValuePair> scan -t accumulo.metadata -c file -r 1t<
+username@instance> scan -t accumulo.metadata -c file -r 1t<
 lt< file:hdfs://localhost:8020/accumulo/tables/1t/default_tablet/A00007g1.rf
 ```
 
  ```
 $ /path/to/accumulo rfile-info -v 
hdfs://localhost/accumulo/tables/1t/default_tablet/A00007g1.rf
+RFile Version            : 8
 
 Locality group           : <DEFAULT>
     Num   blocks           : 1
@@ -154,7 +160,10 @@ Meta block     : RFile.index
       Raw size             : 141 bytes
       Compressed size      : 96 bytes
       Compression type     : gz
-
-567890 address:city [] 1555418057811 false -> Columbia
-567890 name:last [] 1555418063052 false -> Smith
+      
+Locality Group: <DEFAULT>
+Visibility                 Number of keys          Percent of keys      Number 
of blocks        Percent of blocks
+                                2                       100.00%                
 1                  100.00%
+Number of keys: 2
+     
 ```
diff --git a/docs/dirlist.md b/docs/dirlist.md
index 2b653cf..159f7aa 100644
--- a/docs/dirlist.md
+++ b/docs/dirlist.md
@@ -43,26 +43,26 @@ To browse the data ingested, use Viewer.java. Be sure to 
give the "username" use
 
 then run the Viewer:
 
-    $ ./bin/runex dirlist.Viewer -t dirTable --dataTable dataTable --auths 
exampleVis --path /local/username/workspace
+    $ ./bin/runex dirlist.Viewer -t examples.dirTable --dataTable 
examples.dataTable --auths exampleVis --path /local/username/workspace
 
 To list the contents of specific directories, use QueryUtil.java.
 
-    $ ./bin/runex dirlist.QueryUtil -t dirTable --auths exampleVis --path 
/local/username
-    $ ./bin/runex dirlist.QueryUtil -t dirTable --auths exampleVis --path 
/local/username/workspace
+    $ ./bin/runex dirlist.QueryUtil -t examples.dirTable --auths exampleVis 
--path /local/username
+    $ ./bin/runex dirlist.QueryUtil -t examples.dirTable --auths exampleVis 
--path /local/username/workspace
 
 To perform searches on file or directory names, also use QueryUtil.java. 
Search terms must contain no more than one wild card and cannot contain "/".
 *Note* these queries run on the _indexTable_ table instead of the dirTable 
table.
 
-    $ ./bin/runex dirlist.QueryUtil -t indexTable --auths exampleVis --path 
filename --search
-    $ ./bin/runex dirlist.QueryUtil -t indexTable --auths exampleVis --path 
'filename*' --search
-    $ ./bin/runex dirlist.QueryUtil -t indexTable --auths exampleVis --path 
'*jar' --search
-    $ ./bin/runex dirlist.QueryUtil -t indexTable --auths exampleVis --path 
'filename*jar' --search
+    $ ./bin/runex dirlist.QueryUtil -t examples.indexTable --auths exampleVis 
--path filename --search
+    $ ./bin/runex dirlist.QueryUtil -t examples.indexTable --auths exampleVis 
--path 'filename*' --search
+    $ ./bin/runex dirlist.QueryUtil -t examples.indexTable --auths exampleVis 
--path '*jar' --search
+    $ ./bin/runex dirlist.QueryUtil -t examples.indexTable --auths exampleVis 
--path 'filename*jar' --search
 
 To count the number of direct children (directories and files) and descendants 
(children and children's descendants, directories and files), run the FileCount 
over the dirTable table.
 The results are written back to the same table. FileCount reads from and 
writes to Accumulo. This requires scan authorizations for the read and a 
visibility for the data written.
 In this example, the authorizations and visibility are set to the same value, 
exampleVis. See the [visibility example][vis] for more information on 
visibility and authorizations.
 
-    $ ./bin/runex dirlist.FileCount -t dirTable --auths exampleVis
+    $ ./bin/runex dirlist.FileCount -t examples.dirTable --auths exampleVis
 
 ## Directory Table
 
diff --git a/docs/export.md b/docs/export.md
index 6bab52d..87d5385 100644
--- a/docs/export.md
+++ b/docs/export.md
@@ -24,22 +24,23 @@ the table. A table must be offline to export it, and it 
should remain offline
 for the duration of the distcp. An easy way to take a table offline without
 interuppting access to it is to clone it and take the clone offline.
 
-    root@test15> createtable table1
-    root@test15 table1> insert a cf1 cq1 v1
-    root@test15 table1> insert h cf1 cq1 v2
-    root@test15 table1> insert z cf1 cq1 v3
-    root@test15 table1> insert z cf1 cq2 v4
-    root@test15 table1> addsplits -t table1 b r
-    root@test15 table1> scan
+    root@test15> createnamespace examples
+    root@test15> createtable examples.table1
+    root@test15 examples.table1> insert a cf1 cq1 v1
+    root@test15 examples.table1> insert h cf1 cq1 v2
+    root@test15 examples.table1> insert z cf1 cq1 v3
+    root@test15 examples.table1> insert z cf1 cq2 v4
+    root@test15 examples.table1> addsplits -t examples.table1 b r
+    root@test15 examples.table1> scan
     a cf1:cq1 []    v1
     h cf1:cq1 []    v2
     z cf1:cq1 []    v3
     z cf1:cq2 []    v4
-    root@test15> config -t table1 -s table.split.threshold=100M
-    root@test15 table1> clonetable table1 table1_exp
-    root@test15 table1> offline table1_exp
-    root@test15 table1> exporttable -t table1_exp /tmp/table1_export
-    root@test15 table1> quit
+    root@test15 examples.table1> config -t examples.table1 -s 
table.split.threshold=100M
+    root@test15 examples.table1> clonetable examples.table1 examples.table1_exp
+    root@test15 examples.table1table1> offline examples.table1_exp
+    root@test15 examples.table1> exporttable -t examples.table1_exp 
/tmp/table1_export
+    root@test15 examples.table1> quit
 
 After executing the export command, a few files are created in the hdfs dir.
 One of the files is a list of files to distcp as shown below.
@@ -61,17 +62,17 @@ The Accumulo shell session below shows importing the table 
and inspecting it.
 The data, splits, config, and logical time information for the table were
 preserved.
 
-    root@test15> importtable table1_copy /tmp/table1_export_dest
-    root@test15> table table1_copy
-    root@test15 table1_copy> scan
+    root@test15> importtable examples.table1_copy /tmp/table1_export_dest
+    root@test15> table examples.table1_copy
+    root@test15 examples.table1_copy> scan
     a cf1:cq1 []    v1
     h cf1:cq1 []    v2
     z cf1:cq1 []    v3
     z cf1:cq2 []    v4
-    root@test15 table1_copy> getsplits -t table1_copy
+    root@test15 examples.table1_copy> getsplits -t examples.table1_copy
     b
     r
-    root@test15> config -t table1_copy -f split
+    root@test15> config -t examples.table1_copy -f split
     
---------+--------------------------+-------------------------------------------
     SCOPE    | NAME                     | VALUE
     
---------+--------------------------+-------------------------------------------
@@ -83,7 +84,7 @@ preserved.
     accumulo.root        =>        +r
     table1_copy          =>         5
     trace                =>         1
-    root@test15 table1_copy> scan -t accumulo.metadata -b 5 -c srv:time
+    root@test15> scan -t accumulo.metadata -b 5 -c srv:time
     5;b srv:time []    M1343224500467
     5;r srv:time []    M1343224500467
     5< srv:time []    M1343224500467
diff --git a/docs/filedata.md b/docs/filedata.md
index 7ee9d9c..a77a35d 100644
--- a/docs/filedata.md
+++ b/docs/filedata.md
@@ -32,18 +32,21 @@ This example is coupled with the [dirlist example][dirlist].
 
 If you haven't already run the [dirlist example][dirlist], ingest a file with 
FileDataIngest.
 
-    $ ./bin/runex filedata.FileDataIngest -t dataTable --auths exampleVis 
--chunk 1000 /path/to/accumulo/README.md
+    $ ./bin/runex filedata.FileDataIngest -t examples.dataTable --auths 
exampleVis --chunk 1000 /path/to/accumulo/README.md
 
-Open the accumulo shell and look at the data. The row is the MD5 hash of the 
file, which you can verify by running a command such as 'md5sum' on the file.
+Open the accumulo shell and look at the data. The row is the MD5 hash of the 
file, which you can 
+verify by running a command such as 'md5sum' on the file. Note that in order 
to scan the 
+examples.dataTable the class, 
org.apache.accumulo.examples.filedata.ChunkCombiner, must be in 
+your classpath, or the accumulo-examples-shaded.jar should be moved to the 
accumulo lib directory.
 
-    > scan -t dataTable
+    > scan -t examples.dataTable
 
 Run the CharacterHistogram MapReduce to add some information about the file.
 
-    $ ./bin/runmr filedata.CharacterHistogram -t dataTable --auths exampleVis 
--vis exampleVis
+    $ ./bin/runmr filedata.CharacterHistogram -t examples.dataTable --auths 
exampleVis --vis exampleVis
 
 Scan again to see the histogram stored in the 'info' column family.
 
-    > scan -t dataTable
+    > scan -t examples.dataTable
 
 [dirlist]: dirlist.md
diff --git a/docs/filter.md b/docs/filter.md
index 3f41265..ee34b33 100644
--- a/docs/filter.md
+++ b/docs/filter.md
@@ -26,22 +26,23 @@ Filter takes a "negate" parameter which defaults to false. 
If set to true, the
 return value of the accept method is negated, so that key/value pairs accepted
 by the method are omitted by the Filter.
 
-    username@instance> createtable filtertest
-    username@instance filtertest> setiter -t filtertest -scan -p 10 -n 
myfilter -ageoff
+    username@instance> createnamespace examples
+    username@instance> createtable examples.filtertest
+    username@instance examples.filtertest> setiter -t examples.filtertest 
-scan -p 10 -n myfilter -ageoff
     AgeOffFilter removes entries with timestamps more than <ttl> milliseconds 
old
     ----------> set AgeOffFilter parameter negate, default false keeps k/v 
that pass accept method, true rejects k/v that pass accept method:
     ----------> set AgeOffFilter parameter ttl, time to live (milliseconds): 
30000
     ----------> set AgeOffFilter parameter currentTime, if set, use the given 
value as the absolute time in milliseconds as the current time of day:
-    username@instance filtertest> scan
-    username@instance filtertest> insert foo a b c
-    username@instance filtertest> scan
+    username@instance examples.filtertest> scan
+    username@instance examples.filtertest> insert foo a b c
+    username@instance examples.filtertest> scan
     foo a:b []    c
-    username@instance filtertest>
+    username@instance examples.filtertest>
 
 ... wait 30 seconds ...
 
-    username@instance filtertest> scan
-    username@instance filtertest>
+    username@instance examples.filtertest> scan
+    username@instance examples.filtertest>
 
 Note the absence of the entry inserted more than 30 seconds ago. Since the
 scope was set to "scan", this means the entry is still in Accumulo, but is
@@ -59,21 +60,21 @@ AgeOffFilter, but any Filter can be configured by using the 
-class flag. The
 following commands show how to enable the AgeOffFilter for the minc and majc
 scopes using the -class flag, then flush and compact the table.
 
-    username@instance filtertest> setiter -t filtertest -minc -majc -p 10 -n 
myfilter -class org.apache.accumulo.core.iterators.user.AgeOffFilter
+    username@instance examples.filtertest> setiter -t examples.filtertest 
-minc -majc -p 10 -n myfilter -class 
org.apache.accumulo.core.iterators.user.AgeOffFilter
     AgeOffFilter removes entries with timestamps more than <ttl> milliseconds 
old
     ----------> set AgeOffFilter parameter negate, default false keeps k/v 
that pass accept method, true rejects k/v that pass accept method:
     ----------> set AgeOffFilter parameter ttl, time to live (milliseconds): 
30000
     ----------> set AgeOffFilter parameter currentTime, if set, use the given 
value as the absolute time in milliseconds as the current time of day:
-    username@instance filtertest> flush
-    06 10:42:24,806 [shell.Shell] INFO : Flush of table filtertest initiated...
-    username@instance filtertest> compact
-    06 10:42:36,781 [shell.Shell] INFO : Compaction of table filtertest 
started for given range
-    username@instance filtertest> flush -t filtertest -w
-    06 10:42:52,881 [shell.Shell] INFO : Flush of table filtertest completed.
-    username@instance filtertest> compact -t filtertest -w
+    username@instance examples.filtertest> flush
+    06 10:42:24,806 [shell.Shell] INFO : Flush of table examples.filtertest 
initiated...
+    username@instance examples.filtertest> compact
+    06 10:42:36,781 [shell.Shell] INFO : Compaction of table 
examples.filtertest started for given range
+    username@instance examples.filtertest> flush -t examples.filtertest -w
+    06 10:42:52,881 [shell.Shell] INFO : Flush of table examples.filtertest 
completed.
+    username@instance examples.filtertest> compact -t examples.filtertest -w
     06 10:43:00,632 [shell.Shell] INFO : Compacting table ...
-    06 10:43:01,307 [shell.Shell] INFO : Compaction of table filtertest 
completed for given range
-    username@instance filtertest>
+    06 10:43:01,307 [shell.Shell] INFO : Compaction of table 
examples.filtertest completed for given range
+    username@instance examples.filtertest>
 
 By default, flush and compact execute in the background, but with the -w flag
 they will wait to return until the operation has completed. Both are
@@ -86,7 +87,7 @@ the old files.
 
 To see the iterator settings for a table, use config.
 
-    username@instance filtertest> config -t filtertest -f iterator
+    username@instance examples.filtertest> config -t examples.filtertest -f 
iterator
     
---------+---------------------------------------------+---------------------------------------------------------------------------
     SCOPE    | NAME                                        | VALUE
     
---------+---------------------------------------------+---------------------------------------------------------------------------
@@ -103,7 +104,7 @@ To see the iterator settings for a table, use config.
     table    | table.iterator.scan.vers .................. | 
20,org.apache.accumulo.core.iterators.user.VersioningIterator
     table    | table.iterator.scan.vers.opt.maxVersions .. | 1
     
---------+---------------------------------------------+---------------------------------------------------------------------------
-    username@instance filtertest>
+    username@instance examples.filtertest>
 
 When setting new iterators, make sure to order their priority numbers
 (specified with -p) in the order you would like the iterators to be applied.
diff --git a/docs/helloworld.md b/docs/helloworld.md
index 5666b77..b0d4381 100644
--- a/docs/helloworld.md
+++ b/docs/helloworld.md
@@ -31,8 +31,8 @@ On the accumulo status page at the URL below (where 'master' 
is replaced with th
 
 To view the entries, use the shell (run `accumulo shell -u username -p 
password` to access it) to scan the table:
 
-    username@instance> table hellotable
-    username@instance hellotable> scan
+    username@instance> table examples.hellotable
+    username@instance examples.hellotable> scan
 
 You can also use a Java class to scan the table:
 
diff --git a/docs/isolation.md b/docs/isolation.md
index a848af9..1a37567 100644
--- a/docs/isolation.md
+++ b/docs/isolation.md
@@ -30,7 +30,9 @@ reading the row at the same time a mutation is changing the 
row.
 Below, Interference Test is run without isolation enabled for 5000 iterations
 and it reports problems.
 
-    $ ./bin/runex isolation.InterferenceTest -t isotest --iterations 5000
+
+    $ accumulo shell -u <username> -p <password> -e 'createnamespace examples' 
+    $ ./bin/runex isolation.InterferenceTest -t examples.isotest --iterations 
50000
     ERROR Columns in row 053 had multiple values [53, 4553]
     ERROR Columns in row 061 had multiple values [561, 61]
     ERROR Columns in row 070 had multiple values [570, 1070]
@@ -43,7 +45,7 @@ and it reports problems.
 Below, Interference Test is run with isolation enabled for 5000 iterations and
 it reports no problems.
 
-    $ ./bin/runex isolation.InterferenceTest -t isotest --iterations 5000 
--isolated
+    $ ./bin/runex isolation.InterferenceTest -t examples.isotest --iterations 
50000 --isolated
     finished
 
 
diff --git a/docs/regex.md b/docs/regex.md
index f9c4715..86c3b18 100644
--- a/docs/regex.md
+++ b/docs/regex.md
@@ -23,10 +23,11 @@ To run this example you will need some data in a table. The 
following will
 put a trivial amount of data into accumulo using the accumulo shell:
 
     $ accumulo shell
-    username@instance> createtable regex
-    username@instance> insert dogrow dogcf dogcq dogvalue
-    username@instance> insert catrow catcf catcq catvalue
-    username@instance> quit
+    username@instance> createnamespace examples
+    username@instance> createtable examples.regex
+    username@instance examples.regex> insert dogrow dogcf dogcq dogvalue
+    username@instance examples.regex> insert catrow catcf catcq catvalue
+    username@instance examples.regex> quit
 
 The RegexExample class sets an iterator on the scanner. This does pattern 
matching
 against each key/value in accumulo, and only returns matching items. It will 
do this
@@ -34,12 +35,11 @@ in parallel and will store the results in files in hdfs.
 
 The following will search for any rows in the input table that starts with 
"dog":
 
-    $ ./bin/runmr mapreduce.RegexExample -t regex --rowRegex 'dog.*' --output 
/tmp/output
+    $ ./bin/runmr mapreduce.RegexExample -t examples.regex --rowRegex 'dog.*' 
--output /tmp/output
 
     $ hdfs dfs -ls /tmp/output
     Found 3 items
     -rw-r--r--   1 username supergroup          0 2013-01-10 14:11 
/tmp/output/_SUCCESS
-    drwxr-xr-x   - username supergroup          0 2013-01-10 14:10 
/tmp/output/_logs
     -rw-r--r--   1 username supergroup         51 2013-01-10 14:10 
/tmp/output/part-m-00000
 
 We can see the output of our little map-reduce job:
diff --git a/docs/reservations.md b/docs/reservations.md
index a987cfe..0b682ba 100644
--- a/docs/reservations.md
+++ b/docs/reservations.md
@@ -31,9 +31,10 @@ for what, when and who.
     
---------------------+--------------------------------------+-------------------------------
         <instance name> | 9f8f2a97-432f-4e66-b153-861e2a1ca246 |               
 localhost:9999
     
-    $ /path/to/accumulo shell -u root -p secret -e "createtable ars"
+    $ /path/to/accumulo shell -u root -p secret -e "createnamespace examples"
+    $ /path/to/accumulo shell -u root -p secret -e "createtable examples.ars"
     $ ./bin/runex reservations.ARS
-    >connect <instance name> localhost root secret ars
+    >connect <instance name> localhost root secret examples.ars
       connected
     >
       Commands :
@@ -59,8 +60,8 @@ for what, when and who.
 Scanning the table in the Accumulo shell after running the example shows the
 following:
 
-    root@test16> table ars
-    root@test16 ars> scan
+    root@test16> table examples.ars
+    root@test16 examples.ars> scan
     room06:20140101 res:0001 []    mallory
     room06:20140101 res:0003 []    trent
     room06:20140101 res:0004 []    eve
diff --git a/docs/rgbalancer.md b/docs/rgbalancer.md
index daa1a3b..57d87eb 100644
--- a/docs/rgbalancer.md
+++ b/docs/rgbalancer.md
@@ -25,9 +25,10 @@ Below shows creating a table and adding splits.  For this 
example we would like
 all of the tablets where the split point has the same two digits to be on
 different tservers.  This gives us four groups of tablets: 01, 02, 03, and 04. 
  
 
-    root@accumulo> createtable testRGB
-    root@accumulo testRGB> addsplits -t testRGB 01b 01m 01r 01z  02b 02m 02r 
02z 03b 03m 03r 03z 04a 04b 04c 04d 04e 04f 04g 04h 04i 04j 04k 04l 04m 04n 04o 
04p
-    root@accumulo testRGB> tables -l
+    root@accumulo> createnamespace examples
+    root@accumulo> createtable examples.testRGB
+    root@accumulo examples.testRGB> addsplits -t examples.testRGB 01b 01m 01r 
01z 02b 02m 02r 02z 03b 03m 03r 03z 04a 04b 04c 04d 04e 04f 04g 04h 04i 04j 04k 
04l 04m 04n 04o 04p
+    root@accumulo examples.testRGB> tables -l
     accumulo.metadata    =>        !0
     accumulo.replication =>      +rep
     accumulo.root        =>        +r
@@ -36,7 +37,7 @@ different tservers.  This gives us four groups of tablets: 
01, 02, 03, and 04.
 
 After adding the splits we look at the locations in the metadata table.
 
-    root@accumulo testRGB> scan -t accumulo.metadata -b 2; -e 2< -c loc
+    root@accumulo examples.testRGB> scan -t accumulo.metadata -b 2; -e 2< -c 
loc
     2;01b loc:34a5f6e086b000c []    ip-10-1-2-25:9997
     2;01m loc:34a5f6e086b000c []    ip-10-1-2-25:9997
     2;01r loc:14a5f6e079d0011 []    ip-10-1-2-15:9997
@@ -95,13 +96,13 @@ commands below.  The configured regular expression selects 
the first two digits
 from a tablets end row as the group id.  Tablets that don't match and the
 default tablet are configured to be in group 04.
 
-    root@accumulo testRGB> config -t testRGB -s 
table.custom.balancer.group.regex.pattern=(\d\d).*
-    root@accumulo testRGB> config -t testRGB -s 
table.custom.balancer.group.regex.default=04
-    root@accumulo testRGB> config -t testRGB -s 
table.balancer=org.apache.accumulo.server.master.balancer.RegexGroupBalancer
+    root@accumulo examples.testRGB> config -t examples.testRGB -s 
table.custom.balancer.group.regex.pattern=(\d\d).*
+    root@accumulo examples.testRGB> config -t examples.testRGB -s 
table.custom.balancer.group.regex.default=04
+    root@accumulo examples.testRGB> config -t examples.testRGB -s 
table.balancer=org.apache.accumulo.server.master.balancer.RegexGroupBalancer
 
-After waiting a little bit, look at the tablet locations again and all is good.
+After waiting a bit, look at the tablet locations again and all is good.
 
-    root@accumulo testRGB> scan -t accumulo.metadata -b 2; -e 2< -c loc
+    root@accumulo examples.testRGB> scan -t accumulo.metadata -b 2; -e 2< -c 
loc
     2;01b loc:34a5f6e086b000a []    ip-10-1-2-18:9997
     2;01m loc:34a5f6e086b000c []    ip-10-1-2-25:9997
     2;01r loc:14a5f6e079d0011 []    ip-10-1-2-15:9997
diff --git a/docs/rowhash.md b/docs/rowhash.md
index 5bea7df..303c52b 100644
--- a/docs/rowhash.md
+++ b/docs/rowhash.md
@@ -23,20 +23,21 @@ To run this example you will need some data in a table. The 
following will
 put a trivial amount of data into accumulo using the accumulo shell:
 
     $ accumulo shell
-    username@instance> createtable rowhash
-    username@instance> insert a-row cf cq value
-    username@instance> insert b-row cf cq value
-    username@instance> quit
+    username@instance> createnamespace examples
+    username@instance> createtable examples.rowhash
+    username@instance examples.rowhash> insert a-row cf cq value
+    username@instance examples.rowhash> insert b-row cf cq value
+    username@instance examples.rowhash> quit
 
 The RowHash class will insert a hash for each row in the database if it 
contains a
 specified colum. Here's how you run the map/reduce job
 
-    $ ./bin/runmr mapreduce.RowHash -t rowhash --column cf:cq
+    $ ./bin/runmr mapreduce.RowHash -t examples.rowhash --column cf:cq
 
 Now we can scan the table and see the hashes:
 
     $ accumulo shell
-    username@instance> scan -t rowhash
+    username@instance> scan -t examples.rowhash
     a-row cf:cq []    value
     a-row cf-HASHTYPE:cq-MD5BASE64 []    IGPBYI1uC6+AJJxC4r5YBA==
     b-row cf:cq []    value
diff --git a/docs/sample.md b/docs/sample.md
index 931579c..e631f12 100644
--- a/docs/sample.md
+++ b/docs/sample.md
@@ -23,46 +23,47 @@ Accumulo supports building a set of sample data that can be 
efficiently
 accessed by scanners.  What data is included in the sample set is configurable.
 Below, some data representing documents are inserted.  
 
-    root@instance sampex> createtable sampex
-    root@instance sampex> insert 9255 doc content 'abcde'
-    root@instance sampex> insert 9255 doc url file://foo.txt
-    root@instance sampex> insert 8934 doc content 'accumulo scales'
-    root@instance sampex> insert 8934 doc url file://accumulo_notes.txt
-    root@instance sampex> insert 2317 doc content 'milk, eggs, bread, 
parmigiano-reggiano'
-    root@instance sampex> insert 2317 doc url file://groceries/9.txt
-    root@instance sampex> insert 3900 doc content 'EC2 ate my homework'
-    root@instance sampex> insert 3900 doc uril file://final_project.txt
-
-Below the table sampex is configured to build a sample set.  The configuration
+    root@instance> createnamespace examples
+    root@instance> createtable examples.sampex
+    root@instance examples.sampex> insert 9255 doc content 'abcde'
+    root@instance examples.sampex> insert 9255 doc url file://foo.txt
+    root@instance examples.sampex> insert 8934 doc content 'accumulo scales'
+    root@instance examples.sampex> insert 8934 doc url 
file://accumulo_notes.txt
+    root@instance examples.sampex> insert 2317 doc content 'milk, eggs, bread, 
parmigiano-reggiano'
+    root@instance examples.sampex> insert 2317 doc url file://groceries/9.txt
+    root@instance examples.sampex> insert 3900 doc content 'EC2 ate my 
homework'
+    root@instance examples.sampex> insert 3900 doc uril 
file://final_project.txt
+
+Below the table examples.sampex is configured to build a sample set.  The 
configuration
 causes Accumulo to include any row where `murmur3_32(row) % 3 ==0` in the
 tables sample data.
 
-    root@instance sampex> config -t sampex -s 
table.sampler.opt.hasher=murmur3_32
-    root@instance sampex> config -t sampex -s table.sampler.opt.modulus=3
-    root@instance sampex> config -t sampex -s 
table.sampler=org.apache.accumulo.core.client.sample.RowSampler
+    root@instance examples.sampex> config -t examples.sampex -s 
table.sampler.opt.hasher=murmur3_32
+    root@instance examples.sampex> config -t examples.sampex -s 
table.sampler.opt.modulus=3
+    root@instance examples.sampex> config -t examples.sampex -s 
table.sampler=org.apache.accumulo.core.client.sample.RowSampler
 
 Below, attempting to scan the sample returns an error.  This is because data
 was inserted before the sample set was configured.
 
-    root@instance sampex> scan --sample
+    root@instance examples.sampex> scan --sample
     2015-09-09 12:21:50,643 [shell.Shell] ERROR: 
org.apache.accumulo.core.client.SampleNotPresentException: Table sampex(ID:2) 
does not have sampling configured or built
 
 To remedy this problem, the following command will flush in memory data and
 compact any files that do not contain the correct sample data.   
 
-    root@instance sampex> compact -t sampex --sf-no-sample
+    root@instance examples.sampex> compact -t examples.sampex --sf-no-sample
 
 After the compaction, the sample scan works.  
 
-    root@instance sampex> scan --sample
+    root@instance examples.sampex> scan --sample
     2317 doc:content []    milk, eggs, bread, parmigiano-reggiano
     2317 doc:url []    file://groceries/9.txt
 
 The commands below show that updates to data in the sample are seen when
 scanning the sample.
 
-    root@instance sampex> insert 2317 doc content 'milk, eggs, bread, 
parmigiano-reggiano, butter'
-    root@instance sampex> scan --sample
+    root@instance examples.sampex> insert 2317 doc content 'milk, eggs, bread, 
parmigiano-reggiano, butter'
+    root@instance examples.sampex> scan --sample
     2317 doc:content []    milk, eggs, bread, parmigiano-reggiano, butter
     2317 doc:url []    file://groceries/9.txt
 
@@ -72,12 +73,12 @@ data written previously is partitioned using a different 
criteria.  Accumulo
 will detect this situation and fail sample scans.  The commands below show this
 failure and fixiing the problem with a compaction.
 
-    root@instance sampex> config -t sampex -s table.sampler.opt.modulus=2
-    root@instance sampex> scan --sample
+    root@instance examples.sampex> config -t examples.sampex -s 
table.sampler.opt.modulus=2
+    root@instance examples.sampex> scan --sample
     2015-09-09 12:22:51,058 [shell.Shell] ERROR: 
org.apache.accumulo.core.client.SampleNotPresentException: Table sampex(ID:2) 
does not have sampling configured or built
-    root@instance sampex> compact -t sampex --sf-no-sample
+    root@instance examples.sampex> compact -t examples.sampex --sf-no-sample
     2015-09-09 12:23:07,242 [shell.Shell] INFO : Compaction of table sampex 
started for given range
-    root@instance sampex> scan --sample
+    root@instance examples.sampex> scan --sample
     2317 doc:content []    milk, eggs, bread, parmigiano-reggiano
     2317 doc:url []    file://groceries/9.txt
     3900 doc:content []    EC2 ate my homework
@@ -86,18 +87,18 @@ failure and fixiing the problem with a compaction.
     9255 doc:url []    file://foo.txt
 
 The example above is replicated in a java program using the Accumulo API.
-Below is the program name and the command to run it.
+Below is the program name, and the command to run it.
 
     ./bin/runex sample.SampleExample
 
 The commands below look under the hood to give some insight into how this
 feature works.  The commands determine what files the sampex table is using.
 
-    root@instance sampex> tables -l
+    root@instance> tables -l
     accumulo.metadata    =>        !0
     accumulo.replication =>      +rep
     accumulo.root        =>        +r
-    sampex               =>         2
+    examples.sampex      =>         2
     trace                =>         1
     root@instance sampex> scan -t accumulo.metadata -c file -b 2 -e 2<
     2< 
file:hdfs://localhost:10000/accumulo/tables/2/default_tablet/A000000s.rf []    
702,8
@@ -148,31 +149,31 @@ configuration for sample scan to work.
 Shard Sampling Example
 ----------------------
 
-The [shard example][shard] shows how to index and search files using Accumulo. 
 That
+The Shard example shows how to index and search files using Accumulo.  That
 example indexes documents into a table named `shard`.  The indexing scheme used
 in that example places the document name in the column qualifier.  A useful
 sample of this indexing scheme should contain all data for any document in the
 sample.   To accomplish this, the following commands build a sample for the
 shard table based on the column qualifier.
 
-    root@instance shard> config -t shard -s table.sampler.opt.hasher=murmur3_32
-    root@instance shard> config -t shard -s table.sampler.opt.modulus=101
-    root@instance shard> config -t shard -s table.sampler.opt.qualifier=true
-    root@instance shard> config -t shard -s 
table.sampler=org.apache.accumulo.core.client.sample.RowColumnSampler
-    root@instance shard> compact -t shard --sf-no-sample -w
+    root@instance examples.shard> config -t examples.shard -s 
table.sampler.opt.hasher=murmur3_32
+    root@instance examples.shard> config -t examples.shard -s 
table.sampler.opt.modulus=101
+    root@instance examples.shard> config -t examples.shard -s 
table.sampler.opt.qualifier=true
+    root@instance examples.shard> config -t examples.shard -s 
table.sampler=org.apache.accumulo.core.client.sample.RowColumnSampler
+    root@instance examples.shard> compact -t examples.shard --sf-no-sample -w
     2015-07-23 15:00:09,280 [shell.Shell] INFO : Compacting table ...
     2015-07-23 15:00:10,134 [shell.Shell] INFO : Compaction of table shard 
completed for given range
 
 After enabling sampling, the command below counts the number of documents in
 the sample containing the words `import` and `int`.     
 
-    $ ./bin/runex shard.Query --sample -t shard import int | fgrep '.java' | wc
+    $ ./bin/runex shard.Query --sample -t examples.shard import int | fgrep 
'.java' | wc
          11      11    1246
 
 The command below counts the total number of documents containing the words
 `import` and `int`.
 
-    $ ./bin/runex shard.Query -t shard import int | fgrep '.java' | wc
+    $ ./bin/runex shard.Query -t examples.shard import int | fgrep '.java' | wc
        1085    1085  118175
 
 The counts 11 out of 1085 total are around what would be expected for a modulus
@@ -188,4 +189,4 @@ To experiment with this iterator, use the following 
command.  The
 `--sampleCutoff` option below will cause the query to return nothing if based
 on the sample it appears a query would return more than 1000 documents.
 
-    $ ./bin/runex shard.Query --sampleCutoff 1000 -t shard import int | fgrep 
'.java' | wc
+    $ ./bin/runex shard.Query --sampleCutoff 1000 -t examples.shard import int 
| fgrep '.java' | wc
diff --git a/docs/shard.md b/docs/shard.md
index dbae395..f6f6848 100644
--- a/docs/shard.md
+++ b/docs/shard.md
@@ -26,16 +26,17 @@ document, or "sharded". This example shows how to use the 
intersecting iterator
 
 To run these example programs, create two tables like below.
 
-    username@instance> createtable shard
-    username@instance shard> createtable doc2term
+    username@instance> createnamespace examples
+    username@instance> createtable examples.shard
+    username@instance examples.shard> createtable examples.doc2term
 
 After creating the tables, index some files. The following command indexes all 
of the java files in the Accumulo source code.
 
-    $ find /path/to/accumulo/core -name "*.java" | xargs ./bin/runex 
shard.Index -t shard --partitions 30
+    $ find /path/to/accumulo/core -name "*.java" | xargs ./bin/runex 
shard.Index -t examples.shard --partitions 30
 
 The following command queries the index to find all files containing 'foo' and 
'bar'.
 
-    $ ./bin/runex shard.Query -t shard foo bar
+    $ ./bin/runex shard.Query -t examples.shard foo bar
     
/local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/security/ColumnVisibilityTest.java
     
/local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/client/mock/MockConnectorTest.java
     
/local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/security/VisibilityEvaluatorTest.java
@@ -44,12 +45,12 @@ The following command queries the index to find all files 
containing 'foo' and '
 
 In order to run ContinuousQuery, we need to run Reverse.java to populate 
doc2term.
 
-    $ ./bin/runex shard.Reverse --shardTable shard --doc2Term doc2term
+    $ ./bin/runex shard.Reverse --shardTable examples.shard --doc2Term 
examples.doc2term
 
 Below ContinuousQuery is run using 5 terms. So it selects 5 random terms from 
each document, then it continually
 randomly selects one set of 5 terms and queries. It prints the number of 
matching documents and the time in seconds.
 
-    $ ./bin/runex shard.ContinuousQuery --shardTable shard --doc2Term doc2term 
--terms 5
+    $ ./bin/runex shard.ContinuousQuery --shardTable examples.shard --doc2Term 
examples.doc2term --terms 5
     [public, core, class, binarycomparable, b] 2  0.081
     [wordtodelete, unindexdocument, doctablename, putdelete, insert] 1  0.041
     [import, columnvisibilityinterpreterfactory, illegalstateexception, cv, 
columnvisibility] 1  0.049
diff --git a/docs/tabletofile.md b/docs/tabletofile.md
index 36078e6..cb9f248 100644
--- a/docs/tabletofile.md
+++ b/docs/tabletofile.md
@@ -22,26 +22,24 @@ To run this example you will need some data in a table. The 
following will
 put a trivial amount of data into accumulo using the accumulo shell:
 
     $ accumulo shell
-    username@instance> createtable input
-    username@instance> insert dog cf cq dogvalue
-    username@instance> insert cat cf cq catvalue
-    username@instance> insert junk family qualifier junkvalue
-    username@instance> quit
+    root@instance> createnamespace examples
+    root@instance> createtable examples.input
+    root@instance examples.input> insert dog cf cq dogvalue
+    root@instance examples.input> insert cat cf cq catvalue
+    root@instance examples.input> insert junk family qualifier junkvalue
+    root@instance examples.input> quit
 
 The TableToFile class configures a map-only job to read the specified columns 
and
 write the key/value pairs to a file in HDFS.
 
 The following will extract the rows containing the column "cf:cq":
 
-    $ ./bin/runmr mapreduce.TableToFile -t input --columns cf:cq --output 
/tmp/output
+    $ ./bin/runmr mapreduce.TableToFile -t exampmles.input --columns cf:cq 
--output /tmp/output
 
     $ hadoop fs -ls /tmp/output
-    -rw-r--r--   1 username supergroup          0 2013-01-10 14:44 
/tmp/output/_SUCCESS
-    drwxr-xr-x   - username supergroup          0 2013-01-10 14:44 
/tmp/output/_logs
-    drwxr-xr-x   - username supergroup          0 2013-01-10 14:44 
/tmp/output/_logs/history
-    -rw-r--r--   1 username supergroup       9049 2013-01-10 14:44 
/tmp/output/_logs/history/job_201301081658_0011_1357847072863_username_TableToFile%5F1357847071434
-    -rw-r--r--   1 username supergroup      26172 2013-01-10 14:44 
/tmp/output/_logs/history/job_201301081658_0011_conf.xml
-    -rw-r--r--   1 username supergroup         50 2013-01-10 14:44 
/tmp/output/part-m-00000
+    Found 2 items
+    -rw-r--r--   3 root supergroup          0 2021-05-04 10:32 
/tmp/output/_SUCCESS
+    -rw-r--r--   3 root supergroup         44 2021-05-04 10:32 
/tmp/output/part-m-00000
 
 We can see the output of our little map-reduce job:
 
diff --git a/docs/terasort.md b/docs/terasort.md
index 7e39a9e..16f2ea1 100644
--- a/docs/terasort.md
+++ b/docs/terasort.md
@@ -20,15 +20,20 @@ This example uses map/reduce to generate random input data 
that will
 be sorted by storing it into accumulo. It uses data very similar to the
 hadoop terasort benchmark.
 
+First, make sure the 'examples' namespace exists. If it already exists, the 
error message can be 
+ignored.
+
+    $ accumulo shell -u root -p secret -e 'createnamespace examples'   
+
 To run this example you run it with arguments describing the amount of data:
 
     $ ./bin/runmr mapreduce.TeraSortIngest --count 10 --minKeySize 10 
--maxKeySize 10 \
-    --minValueSize 78 --maxValueSize 78 --table sort --splits 10 \
+        --minValueSize 78 --maxValueSize 78 --table examples.sort --splits 10 \
 
 After the map reduce job completes, scan the data:
 
     $ accumulo shell
-    username@instance> scan -t sort
+    username@instance> scan -t examples.sort
     +l-$$OE/ZH c:         4 []    
GGGGGGGGGGWWWWWWWWWWMMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOO
     ,C)wDw//u= c:        10 []    
CCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKK
     75@~?'WdUF c:         1 []    
IIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQ
diff --git a/docs/visibility.md b/docs/visibility.md
index daca822..b79f91c 100644
--- a/docs/visibility.md
+++ b/docs/visibility.md
@@ -18,12 +18,14 @@ limitations under the License.
 
 ## Creating a new user
 
+    root@instance> createnamespace examples
     root@instance> createuser username
     Enter new password for 'username': ********
     Please confirm new password for 'username': ********
+
     root@instance> user username
     Enter password for user username: ********
-    username@instance> createtable vistest
+    username@instance> createtable examples.vistest
     06 10:48:47,931 [shell.Shell] ERROR: 
org.apache.accumulo.core.client.AccumuloSecurityException: Error 
PERMISSION_DENIED - User does not have permission to perform this action
     username@instance> userpermissions
     System permissions:
@@ -44,8 +46,8 @@ A user does not by default have permission to create a table.
     root@instance> grant -s System.CREATE_TABLE -u username
     root@instance> user username
     Enter password for user username: ********
-    username@instance> createtable vistest
-    username@instance> userpermissions
+    username@instance> createtable examples.vistest
+    username@instance examples.vistest> userpermissions
     System permissions: System.CREATE_TABLE
 
     Namespace permissions (accumulo): Namespace.READ
@@ -53,8 +55,7 @@ A user does not by default have permission to create a table.
     Table permissions (accumulo.metadata): Table.READ
     Table permissions (accumulo.replication): Table.READ
     Table permissions (accumulo.root): Table.READ
-    Table permissions (vistest): Table.READ, Table.WRITE, Table.BULK_IMPORT, 
Table.ALTER_TABLE, Table.GRANT, Table.DROP_TABLE, Table.GET_SUMMARIES
-    username@instance vistest>
+    Table permissions (examples.vistest): Table.READ, Table.WRITE, 
Table.BULK_IMPORT, Table.ALTER_TABLE, Table.GRANT, Table.DROP_TABLE, 
Table.GET_SUMMARIES
 
 ## Inserting data with visibilities
 
@@ -63,14 +64,14 @@ tokens. Authorization tokens are arbitrary strings taken 
from a restricted
 ASCII character set. Parentheses are required to specify order of operations
 in visibilities.
 
-    username@instance vistest> insert row f1 q1 v1 -l A
-    username@instance vistest> insert row f2 q2 v2 -l A&B
-    username@instance vistest> insert row f3 q3 v3 -l 
apple&carrot|broccoli|spinach
+    username@instance examples.vistest> insert row f1 q1 v1 -l A
+    username@instance examples.vistest> insert row f2 q2 v2 -l A&B
+    username@instance examples.vistest> insert row f3 q3 v3 -l 
apple&carrot|broccoli|spinach
     06 11:19:01,432 [shell.Shell] ERROR: 
org.apache.accumulo.core.util.BadArgumentException: cannot mix | and & near 
index 12
     apple&carrot|broccoli|spinach
                 ^
-    username@instance vistest> insert row f3 q3 v3 -l 
(apple&carrot)|broccoli|spinach
-    username@instance vistest>
+    username@instance examples.vistest> insert row f3 q3 v3 -l 
(apple&carrot)|broccoli|spinach
+    username@instance examples.vistest>
 
 ## Scanning with authorizations
 
@@ -79,54 +80,54 @@ authorizations and each Accumulo scan has authorizations. 
Scan authorizations
 are only allowed to be a subset of the user's authorizations. By default, a
 user's authorizations set is empty.
 
-    username@instance vistest> scan
-    username@instance vistest> scan -s A
+    username@instance examples.vistest> scan
+    username@instance examples.vistest> scan -s A
     06 11:43:14,951 [shell.Shell] ERROR: java.lang.RuntimeException: 
org.apache.accumulo.core.client.AccumuloSecurityException: Error 
BAD_AUTHORIZATIONS - The user does not have the specified authorizations 
assigned
-    username@instance vistest>
+    username@instance examples.vistest>
 
 ## Setting authorizations for a user
 
-    username@instance vistest> setauths -s A
+    username@instance examples.vistest> setauths -s A
     06 11:53:42,056 [shell.Shell] ERROR: 
org.apache.accumulo.core.client.AccumuloSecurityException: Error 
PERMISSION_DENIED - User does not have permission to perform this action
-    username@instance vistest>
+    username@instance examples.vistest>
 
 A user cannot set authorizations unless the user has the System.ALTER_USER 
permission.
 The root user has this permission.
 
-    username@instance vistest> user root
+    username@instance examples.vistest> user root
     Enter password for user root: ********
-    root@instance vistest> setauths -s A -u username
-    root@instance vistest> user username
+    root@instance examples.vistest> setauths -s A -u username
+    root@instance examples.vistest> user username
     Enter password for user username: ********
-    username@instance vistest> scan -s A
+    username@instance examples.vistest> scan -s A
     row f1:q1 [A]    v1
-    username@instance vistest> scan
+    username@instance examples.vistest> scan
     row f1:q1 [A]    v1
-    username@instance vistest>
+    username@instance examples.vistest>
 
 The default authorizations for a scan are the user's entire set of 
authorizations.
 
-    username@instance vistest> user root
+    username@instance examples.vistest> user root
     Enter password for user root: ********
-    root@instance vistest> setauths -s A,B,broccoli -u username
-    root@instance vistest> user username
+    root@instance examples.vistest> setauths -s A,B,broccoli -u username
+    root@instance examples.vistest> user username
     Enter password for user username: ********
-    username@instance vistest> getauths
+    username@instance examples.vistest> getauths
     A,B,broccoli
-    username@instance vistest> getauths -u username
+    username@instance examples.vistest> getauths -u username
     A,B,broccoli
 
-    username@instance vistest> scan
+    username@instance examples.vistest> scan
     row f1:q1 [A]    v1
     row f2:q2 [A&B]    v2
     row f3:q3 [(apple&carrot)|broccoli|spinach]    v3
-    username@instance vistest> scan -s B
-    username@instance vistest>
+    username@instance examples.vistest> scan -s B
+    username@instance examples.vistest>
 
 If you want, you can limit a user to only be able to insert data which they 
can read themselves.
 First, check for any existing constraints.
 
-    username@instance vistest> constraint -l -t vistest
+    username@instance examples.vistest> constraint -l -t examples.vistest
     org.apache.accumulo.core.constraints.DefaultKeySizeConstraint=1
 
 If existing constraints exists, take note of the values assigned to the 
constraints and use a
@@ -137,18 +138,18 @@ available value. In this case '2'.
 
 The constraint can be set with the following command:
 
-    username@instance vistest> config -t vistest -s 
table.constraint.2=org.apache.accumulo.core.constraints.VisibilityConstraint
-    username@instance vistest> constraint -l
-    org.apache.accumulo.core.constraints.DefaultKeySizeConstraint=1
-    org.apache.accumulo.core.constraints.VisibilityConstraint=2
-    username@instance vistest> insert row f4 q4 v4 -l spinach
+    username@instance examples.vistest> config -t examples.vistest -s 
table.constraint.2=org.apache.accumulo.core.data.constraints.VisibilityConstraint
+    username@instance examples.vistest> constraint -l
+    org.apache.accumulo.core.data.constraints.DefaultKeySizeConstraint=1
+    org.apache.accumulo.core.data.constraints.VisibilityConstraint=2
+    username@instance examples.vistest> insert row f4 q4 v4 -l spinach
          Constraint Failures:
             
ConstraintViolationSummary(constrainClass:org.apache.accumulo.core.constraints.VisibilityConstraint,
 violationCode:2, violationDescription:User does not have authorization on 
column visibility, numberOfViolatingMutations:1)
-    username@instance vistest> insert row f4 q4 v4 -l spinach|broccoli
-    username@instance vistest> scan
+    username@instance examples.vistest> insert row f4 q4 v4 -l spinach|broccoli
+    username@instance examples.vistest> scan
     row f1:q1 [A]    v1
     row f2:q2 [A&B]    v2
     row f3:q3 [(apple&carrot)|broccoli|spinach]    v3
     row f4:q4 [spinach|broccoli]    v4
-    username@instance vistest>
+    username@instance examples.vistest>
 
diff --git a/docs/wordcount.md b/docs/wordcount.md
index 3e3cc52..8da6599 100644
--- a/docs/wordcount.md
+++ b/docs/wordcount.md
@@ -41,8 +41,8 @@ writes word counts to Accumulo table.
 After the MapReduce job completes, query the Accumulo table to see word counts.
 
     $ accumulo shell
-    username@instance> table wordCount
-    username@instance wordCount> scan -b the
+    username@instance> table examples.wordcount
+    username@instance examples.wordcount> scan -b the
     the count:20080906 []    75
     their count:20080906 []    2
     them count:20080906 []    1
@@ -56,14 +56,14 @@ in HDFS and run the job with the `-D` options.  This will 
configure the MapReduc
 to obtain the client properties from HDFS:
 
     $ hdfs dfs -copyFromLocal ./conf/accumulo-client.properties /user/myuser/
-    $ ./bin/runmr mapreduce.WordCount -i /wc -t wordCount2 -d 
/user/myuser/accumulo-client.properties
+    $ ./bin/runmr mapreduce.WordCount -i /wc -t examples.wordcount2 -d 
/user/myuser/accumulo-client.properties
 
-After the MapReduce job completes, query the `wordCount2` table. The results 
should
+After the MapReduce job completes, query the `wordcount2` table. The results 
should
 be the same as before:
 
     $ accumulo shell
-    username@instance> table wordCount2
-    username@instance wordCount2> scan -b the
+    username@instance> table examples.wordcount2
+    username@instance examples.wordcount2> scan -b the
     the count:20080906 []    75
     their count:20080906 []    2
     ...
diff --git a/spark/src/main/java/org/apache/accumulo/spark/CopyPlus5K.java 
b/spark/src/main/java/org/apache/accumulo/spark/CopyPlus5K.java
index 416d360..cd58889 100644
--- a/spark/src/main/java/org/apache/accumulo/spark/CopyPlus5K.java
+++ b/spark/src/main/java/org/apache/accumulo/spark/CopyPlus5K.java
@@ -41,6 +41,8 @@ import org.apache.spark.api.java.JavaSparkContext;
 
 public class CopyPlus5K {
 
+  private static final Logger log = LoggerFactory.getLogger(CopyPlus5K.class);
+
   public static class AccumuloRangePartitioner extends Partitioner {
 
     private static final long serialVersionUID = 1L;
@@ -76,8 +78,20 @@ public class CopyPlus5K {
         client.tableOperations().delete(outputTable);
       }
       // Create tables
-      client.tableOperations().create(inputTable);
-      client.tableOperations().create(outputTable);
+      try {
+        client.tableOperations().create(inputTable);
+      } catch (TableExistsException e) {
+        log.error("Something went wrong. Table '{}' should have been deleted 
prior to creation "
+            + "attempt!", inputTable);
+        return;
+      }
+      try {
+        client.tableOperations().create(outputTable);
+      } catch (TableExistsException e) {
+        log.error("Something went wrong. Table '{}' should have been deleted 
prior to creation "
+            + "attempt!", inputTable);
+        return;
+      }
 
       // Write data to input table
       try (BatchWriter bw = client.createBatchWriter(inputTable)) {
diff --git a/src/main/java/org/apache/accumulo/examples/Common.java 
b/src/main/java/org/apache/accumulo/examples/Common.java
new file mode 100644
index 0000000..69d1043
--- /dev/null
+++ b/src/main/java/org/apache/accumulo/examples/Common.java
@@ -0,0 +1,51 @@
+package org.apache.accumulo.examples;
+
+import org.apache.accumulo.core.client.AccumuloClient;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.NamespaceExistsException;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class Common {
+
+  private static final Logger log = LoggerFactory.getLogger(Common.class);
+
+  public static final String NAMESPACE = "examples";
+
+  public static final String TABLE_EXISTS_MSG = "Table already exists. User 
may wish to delete "
+      + "existing table and re-run example. Table name: ";
+  public static final String NAMESPACE_EXISTS_MSG = "Namespace already exists. 
User can ignore "
+      + "this message and continue. Namespace: ";
+
+  /**
+   * Create a table within the supplied namespace.
+   *
+   * The incoming table name is expected to have the form 
"namespace.tablename". If the namespace
+   * portion of the name is blank then the table is created outside of a 
namespace.
+   *
+   * @param client
+   *          AccumuloClient instance
+   * @param table
+   *          The name of the table to be created
+   * @throws AccumuloException
+   * @throws AccumuloSecurityException
+   */
+  public static void createTableWithNamespace(final AccumuloClient client, 
final String table)
+      throws AccumuloException, AccumuloSecurityException {
+    String[] name = table.split("\\.");
+    if (name.length == 2 && !name[0].isEmpty()) {
+      try {
+        client.namespaceOperations().create(name[0]);
+      } catch (NamespaceExistsException e) {
+        log.info(NAMESPACE_EXISTS_MSG + name[0]);
+      }
+    }
+    try {
+      client.tableOperations().create(table);
+    } catch (TableExistsException e) {
+      log.warn(TABLE_EXISTS_MSG + table);
+    }
+  }
+}
diff --git 
a/src/main/java/org/apache/accumulo/examples/bloom/BloomBatchScanner.java 
b/src/main/java/org/apache/accumulo/examples/bloom/BloomBatchScanner.java
index 2391dda..80e9df2 100644
--- a/src/main/java/org/apache/accumulo/examples/bloom/BloomBatchScanner.java
+++ b/src/main/java/org/apache/accumulo/examples/bloom/BloomBatchScanner.java
@@ -32,19 +32,25 @@ import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.examples.cli.ClientOpts;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Simple example for reading random batches of data from Accumulo.
  */
-public class BloomBatchScanner {
+public final class BloomBatchScanner {
+
+  private static final Logger log = 
LoggerFactory.getLogger(BloomBatchScanner.class);
+
+  private BloomBatchScanner() {}
 
   public static void main(String[] args) throws TableNotFoundException {
     ClientOpts opts = new ClientOpts();
     opts.parseArgs(BloomBatchScanner.class.getName(), args);
 
     try (AccumuloClient client = 
Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
-      scan(client, "bloom_test1", 7);
-      scan(client, "bloom_test2", 7);
+      scan(client, BloomCommon.BLOOM_TEST1_TABLE, 7);
+      scan(client, BloomCommon.BLOOM_TEST2_TABLE, 7);
     }
   }
 
@@ -64,22 +70,21 @@ public class BloomBatchScanner {
     long results = 0;
     long lookups = ranges.size();
 
-    System.out.println("Scanning " + tableName + " with seed " + seed);
+    log.info("Scanning {} with seed {}", tableName, seed);
     try (BatchScanner scan = client.createBatchScanner(tableName, 
Authorizations.EMPTY, 20)) {
       scan.setRanges(ranges);
       for (Entry<Key,Value> entry : scan) {
         Key key = entry.getKey();
-        if (!expectedRows.containsKey(key.getRow().toString())) {
-          System.out.println("Encountered unexpected key: " + key);
-        } else {
+        if (expectedRows.containsKey(key.getRow().toString())) {
           expectedRows.put(key.getRow().toString(), true);
+        } else {
+          log.info("Encountered unexpected key: {}", key);
         }
         results++;
       }
     }
-
     long t2 = System.currentTimeMillis();
-    System.out.println(String.format("Scan finished! %6.2f lookups/sec, %.2f 
secs, %d results",
+    log.info(String.format("Scan finished! %6.2f lookups/sec, %.2f secs, %d 
results",
         lookups / ((t2 - t1) / 1000.0), ((t2 - t1) / 1000.0), results));
 
     int count = 0;
@@ -89,8 +94,8 @@ public class BloomBatchScanner {
       }
     }
     if (count > 0)
-      System.out.println("Did not find " + count);
+      log.info("Did not find " + count);
     else
-      System.out.println("All expected rows were scanned");
+      log.info("All expected rows were scanned");
   }
 }
diff --git a/src/main/java/org/apache/accumulo/examples/bloom/BloomCommon.java 
b/src/main/java/org/apache/accumulo/examples/bloom/BloomCommon.java
new file mode 100644
index 0000000..ed72f3a
--- /dev/null
+++ b/src/main/java/org/apache/accumulo/examples/bloom/BloomCommon.java
@@ -0,0 +1,13 @@
+package org.apache.accumulo.examples.bloom;
+
+import org.apache.accumulo.examples.Common;
+
+enum BloomCommon {
+  ;
+  public static final String BLOOM_TEST1_TABLE = Common.NAMESPACE + 
".bloom_test1";
+  public static final String BLOOM_TEST2_TABLE = Common.NAMESPACE + 
".bloom_test2";
+  public static final String BLOOM_TEST3_TABLE = Common.NAMESPACE + 
".bloom_test3";
+  public static final String BLOOM_TEST4_TABLE = Common.NAMESPACE + 
".bloom_test4";
+
+  public static final String BLOOM_ENABLED_PROPERTY = "table.bloom.enabled";
+}
diff --git a/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java 
b/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java
index 6bba9ea..d2988f5 100644
--- a/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java
+++ b/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java
@@ -24,14 +24,20 @@ import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.ClientOpts;
 import org.apache.accumulo.examples.client.RandomBatchWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-public class BloomFilters {
+public final class BloomFilters {
+
+  private static final Logger log = 
LoggerFactory.getLogger(BloomFilters.class);
+
+  private BloomFilters() {}
 
   public static void main(String[] args)
       throws AccumuloException, AccumuloSecurityException, 
TableNotFoundException {
@@ -40,34 +46,32 @@ public class BloomFilters {
     opts.parseArgs(BloomFilters.class.getName(), args);
 
     try (AccumuloClient client = 
Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
-      try {
-        System.out.println("Creating bloom_test1 and bloom_test2");
-        client.tableOperations().create("bloom_test1");
-        client.tableOperations().setProperty("bloom_test1", 
"table.compaction.major.ratio", "7");
-        client.tableOperations().create("bloom_test2");
-        client.tableOperations().setProperty("bloom_test2", 
"table.bloom.enabled", "true");
-        client.tableOperations().setProperty("bloom_test2", 
"table.compaction.major.ratio", "7");
-      } catch (TableExistsException e) {
-        // ignore
-      }
+      createTableAndSetCompactionRatio(client, BloomCommon.BLOOM_TEST1_TABLE);
+      createTableAndSetCompactionRatio(client, BloomCommon.BLOOM_TEST2_TABLE);
+      client.tableOperations().setProperty(BloomCommon.BLOOM_TEST2_TABLE,
+          BloomCommon.BLOOM_ENABLED_PROPERTY, "true");
+      writeAndFlushData(BloomCommon.BLOOM_TEST1_TABLE, client);
+      writeAndFlushData(BloomCommon.BLOOM_TEST2_TABLE, client);
+    }
+  }
 
-      // Write a million rows 3 times flushing files to disk separately
-      System.out.println("Writing data to bloom_test1");
-      writeData(client, "bloom_test1", 7);
-      client.tableOperations().flush("bloom_test1", null, null, true);
-      writeData(client, "bloom_test1", 8);
-      client.tableOperations().flush("bloom_test1", null, null, true);
-      writeData(client, "bloom_test1", 9);
-      client.tableOperations().flush("bloom_test1", null, null, true);
+  private static void createTableAndSetCompactionRatio(AccumuloClient client,
+      final String tableName) throws AccumuloException, 
AccumuloSecurityException {
+    log.info("Creating {}", tableName);
+    Common.createTableWithNamespace(client, tableName);
+    client.tableOperations().setProperty(tableName, 
"table.compaction.major.ratio", "7");
+  }
 
-      System.out.println("Writing data to bloom_test2");
-      writeData(client, "bloom_test2", 7);
-      client.tableOperations().flush("bloom_test2", null, null, true);
-      writeData(client, "bloom_test2", 8);
-      client.tableOperations().flush("bloom_test2", null, null, true);
-      writeData(client, "bloom_test2", 9);
-      client.tableOperations().flush("bloom_test2", null, null, true);
-    }
+  // Write a million rows 3 times flushing files to disk separately
+  private static void writeAndFlushData(final String tableName, final 
AccumuloClient client)
+      throws TableNotFoundException, AccumuloSecurityException, 
AccumuloException {
+    log.info("Writing data to {}", tableName);
+    writeData(client, tableName, 7);
+    client.tableOperations().flush(tableName, null, null, true);
+    writeData(client, tableName, 8);
+    client.tableOperations().flush(tableName, null, null, true);
+    writeData(client, tableName, 9);
+    client.tableOperations().flush(tableName, null, null, true);
   }
 
   // write a million random rows
diff --git 
a/src/main/java/org/apache/accumulo/examples/bloom/BloomFiltersNotFound.java 
b/src/main/java/org/apache/accumulo/examples/bloom/BloomFiltersNotFound.java
index 668ff3e..e050102 100644
--- a/src/main/java/org/apache/accumulo/examples/bloom/BloomFiltersNotFound.java
+++ b/src/main/java/org/apache/accumulo/examples/bloom/BloomFiltersNotFound.java
@@ -22,33 +22,39 @@ import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.ClientOpts;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class BloomFiltersNotFound {
 
+  private static final Logger log = 
LoggerFactory.getLogger(BloomFiltersNotFound.class);
+
   public static void main(String[] args)
       throws AccumuloException, AccumuloSecurityException, 
TableNotFoundException {
     ClientOpts opts = new ClientOpts();
     opts.parseArgs(BloomFiltersNotFound.class.getName(), args);
 
     try (AccumuloClient client = 
Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
-      try {
-        client.tableOperations().create("bloom_test3");
-        client.tableOperations().create("bloom_test4");
-        client.tableOperations().setProperty("bloom_test4", 
"table.bloom.enabled", "true");
-      } catch (TableExistsException e) {
-        // ignore
-      }
-      System.out.println("Writing data to bloom_test3 and bloom_test4 (bloom 
filters enabled)");
-      writeData(client, "bloom_test3", 7);
-      client.tableOperations().flush("bloom_test3", null, null, true);
-      writeData(client, "bloom_test4", 7);
-      client.tableOperations().flush("bloom_test4", null, null, true);
-
-      BloomBatchScanner.scan(client, "bloom_test3", 8);
-      BloomBatchScanner.scan(client, "bloom_test4", 8);
+      Common.createTableWithNamespace(client, BloomCommon.BLOOM_TEST3_TABLE);
+      Common.createTableWithNamespace(client, BloomCommon.BLOOM_TEST4_TABLE);
+      client.tableOperations().setProperty(BloomCommon.BLOOM_TEST4_TABLE,
+          BloomCommon.BLOOM_ENABLED_PROPERTY, "true");
+
+      writeAndFlush(BloomCommon.BLOOM_TEST3_TABLE, client);
+      writeAndFlush(BloomCommon.BLOOM_TEST4_TABLE, client);
+
+      BloomBatchScanner.scan(client, BloomCommon.BLOOM_TEST3_TABLE, 8);
+      BloomBatchScanner.scan(client, BloomCommon.BLOOM_TEST4_TABLE, 8);
     }
   }
+
+  private static void writeAndFlush(String tableName, AccumuloClient client)
+      throws TableNotFoundException, AccumuloException, 
AccumuloSecurityException {
+    log.info("Writing data to {} (bloom filters enabled)", tableName);
+    writeData(client, tableName, 7);
+    client.tableOperations().flush(tableName, null, null, true);
+  }
 }
diff --git 
a/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java 
b/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java
index df955ae..0bd46f0 100644
--- a/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java
+++ b/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java
@@ -27,10 +27,7 @@ import java.util.Random;
 
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
@@ -43,22 +40,24 @@ import org.slf4j.LoggerFactory;
 /**
  * Simple example for reading random batches of data from Accumulo.
  */
-public class RandomBatchScanner {
+public final class RandomBatchScanner {
 
   private static final Logger log = 
LoggerFactory.getLogger(RandomBatchScanner.class);
 
-  public static void main(String[] args)
-      throws AccumuloException, AccumuloSecurityException, 
TableNotFoundException {
+  private RandomBatchScanner() {}
+
+  public static void main(String[] args) throws TableNotFoundException {
 
     ClientOpts opts = new ClientOpts();
     opts.parseArgs(RandomBatchScanner.class.getName(), args);
 
     try (AccumuloClient client = 
Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
 
-      try {
-        client.tableOperations().create("batch");
-      } catch (TableExistsException e) {
-        // ignore
+      if (!client.tableOperations().exists(SequentialBatchWriter.BATCH_TABLE)) 
{
+        log.error(
+            "Table " + SequentialBatchWriter.BATCH_TABLE + " does not exist. 
Nothing to scan!");
+        log.error("Try running  './bin/runex client.SequentialBatchWriter' 
first");
+        return;
       }
 
       int totalLookups = 1000;
@@ -78,7 +77,8 @@ public class RandomBatchScanner {
       long lookups = 0;
 
       log.info("Reading ranges using BatchScanner");
-      try (BatchScanner scan = client.createBatchScanner("batch", 
Authorizations.EMPTY, 20)) {
+      try (BatchScanner scan = 
client.createBatchScanner(SequentialBatchWriter.BATCH_TABLE,
+          Authorizations.EMPTY, 20)) {
         scan.setRanges(ranges);
         for (Entry<Key,Value> entry : scan) {
           Key key = entry.getKey();
@@ -93,10 +93,10 @@ public class RandomBatchScanner {
                 new String(expectedValue.get(), UTF_8), new 
String(value.get(), UTF_8));
           }
 
-          if (!expectedRows.containsKey(key.getRow().toString())) {
-            log.error("Encountered unexpected key: {} ", key);
-          } else {
+          if (expectedRows.containsKey(key.getRow().toString())) {
             expectedRows.put(key.getRow().toString(), true);
+          } else {
+            log.error("Encountered unexpected key: {} ", key);
           }
 
           lookups++;
@@ -105,7 +105,6 @@ public class RandomBatchScanner {
           }
         }
       }
-
       long t2 = System.currentTimeMillis();
       log.info(String.format("Scan finished! %6.2f lookups/sec, %.2f secs, %d 
results",
           lookups / ((t2 - t1) / 1000.0), ((t2 - t1) / 1000.0), lookups));
diff --git 
a/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java 
b/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java
index 79c5e0e..8b3696d 100644
--- a/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java
+++ b/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java
@@ -20,14 +20,16 @@ import java.util.Map.Entry;
 
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.NamespaceExistsException;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.ClientOpts;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -35,44 +37,45 @@ import org.slf4j.LoggerFactory;
 public class ReadWriteExample {
 
   private static final Logger log = 
LoggerFactory.getLogger(ReadWriteExample.class);
-  private static final String namespace = "examples";
-  private static final String table = namespace + ".readwrite";
 
-  public static void main(String[] args) throws Exception {
+  private static final String READWRITE_TABLE = Common.NAMESPACE + 
".readwrite";
+
+  private ReadWriteExample() {}
+
+  public static void main(String[] args) throws AccumuloSecurityException, 
AccumuloException {
     ClientOpts opts = new ClientOpts();
     opts.parseArgs(ReadWriteExample.class.getName(), args);
 
     try (AccumuloClient client = 
Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
-
-      try {
-        client.namespaceOperations().create(namespace);
-      } catch (NamespaceExistsException e) {
-        // ignore
-      }
-      try {
-        client.tableOperations().create(table);
-      } catch (TableExistsException e) {
-        // ignore
-      }
-
+      Common.createTableWithNamespace(client, READWRITE_TABLE);
       // write data
-      try (BatchWriter writer = client.createBatchWriter(table)) {
+      try (BatchWriter writer = client.createBatchWriter(READWRITE_TABLE)) {
         for (int i = 0; i < 10; i++) {
           Mutation m = new Mutation("hello" + i);
           m.put("cf", "cq", new Value("world" + i));
           writer.addMutation(m);
         }
+      } catch (TableNotFoundException e) {
+        log.error("Could not find table {}: {}", e.getTableName(), 
e.getMessage());
+        System.exit(1);
       }
 
       // read data
-      try (Scanner scanner = client.createScanner(table, 
Authorizations.EMPTY)) {
+      try (Scanner scanner = client.createScanner(READWRITE_TABLE, 
Authorizations.EMPTY)) {
         for (Entry<Key,Value> entry : scanner) {
-          log.info(entry.getKey().toString() + " -> " + 
entry.getValue().toString());
+          log.info("{} -> {}", entry.getKey().toString(), 
entry.getValue().toString());
         }
+      } catch (TableNotFoundException e) {
+        log.error("Could not find table {}: {}", e.getTableName(), 
e.getMessage());
+        System.exit(1);
       }
 
       // delete table
-      client.tableOperations().delete(table);
+      try {
+        client.tableOperations().delete(READWRITE_TABLE);
+      } catch (TableNotFoundException e) {
+        log.error("Unable to delete table '{}': {}", e.getTableName(), 
e.getMessage());
+      }
     }
   }
 }
diff --git 
a/src/main/java/org/apache/accumulo/examples/client/RowOperations.java 
b/src/main/java/org/apache/accumulo/examples/client/RowOperations.java
index 000bee1..d97e10e 100644
--- a/src/main/java/org/apache/accumulo/examples/client/RowOperations.java
+++ b/src/main/java/org/apache/accumulo/examples/client/RowOperations.java
@@ -24,15 +24,14 @@ import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.NamespaceExistsException;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.ClientOpts;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -40,25 +39,27 @@ import org.slf4j.LoggerFactory;
 /**
  * A demonstration of reading entire rows and deleting entire rows.
  */
-public class RowOperations {
+public final class RowOperations {
 
   private static final Logger log = 
LoggerFactory.getLogger(RowOperations.class);
-  private static final String namespace = "examples";
-  private static final String table = namespace + ".rowops";
+
+  static final String ROWOPS_TABLE = Common.NAMESPACE + ".rowops";
+
+  private RowOperations() {}
 
   private static void printAll(AccumuloClient client) throws 
TableNotFoundException {
-    try (Scanner scanner = client.createScanner(table, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(ROWOPS_TABLE, 
Authorizations.EMPTY)) {
       for (Entry<Key,Value> entry : scanner) {
-        log.info("Key: " + entry.getKey().toString() + " Value: " + 
entry.getValue().toString());
+        log.info("Key: {} Value: {}", entry.getKey().toString(), 
entry.getValue().toString());
       }
     }
   }
 
   private static void printRow(String row, AccumuloClient client) throws 
TableNotFoundException {
-    try (Scanner scanner = client.createScanner(table, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(ROWOPS_TABLE, 
Authorizations.EMPTY)) {
       scanner.setRange(Range.exact(row));
       for (Entry<Key,Value> entry : scanner) {
-        log.info("Key: " + entry.getKey().toString() + " Value: " + 
entry.getValue().toString());
+        log.info("Key: {} Value: {}", entry.getKey().toString(), 
entry.getValue().toString());
       }
     }
   }
@@ -66,7 +67,7 @@ public class RowOperations {
   private static void deleteRow(String row, AccumuloClient client, BatchWriter 
bw)
       throws MutationsRejectedException, TableNotFoundException {
     Mutation mut = new Mutation(row);
-    try (Scanner scanner = client.createScanner(table, Authorizations.EMPTY)) {
+    try (Scanner scanner = client.createScanner(ROWOPS_TABLE, 
Authorizations.EMPTY)) {
       scanner.setRange(Range.exact(row));
       for (Entry<Key,Value> entry : scanner) {
         mut.putDelete(entry.getKey().getColumnFamily(), 
entry.getKey().getColumnQualifier());
@@ -82,16 +83,7 @@ public class RowOperations {
     opts.parseArgs(RowOperations.class.getName(), args);
 
     try (AccumuloClient client = 
Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
-      try {
-        client.namespaceOperations().create(namespace);
-      } catch (NamespaceExistsException e) {
-        // ignore
-      }
-      try {
-        client.tableOperations().create(table);
-      } catch (TableExistsException e) {
-        // ignore
-      }
+      Common.createTableWithNamespace(client, ROWOPS_TABLE);
 
       // lets create 3 rows of information
       Mutation mut1 = new Mutation("row1");
@@ -111,7 +103,7 @@ public class RowOperations {
       mut3.put("col", "3", "v3");
 
       // Now we'll make a Batch Writer
-      try (BatchWriter bw = client.createBatchWriter(table)) {
+      try (BatchWriter bw = client.createBatchWriter(ROWOPS_TABLE)) {
 
         // And add the mutations
         bw.addMutation(mut1);
@@ -138,7 +130,7 @@ public class RowOperations {
       log.info("This is just row3");
       printAll(client);
 
-      client.tableOperations().delete(table);
+      client.tableOperations().delete(ROWOPS_TABLE);
     }
   }
 }
diff --git 
a/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java 
b/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java
index ea8ca2d..d8a575b 100644
--- 
a/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java
+++ 
b/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java
@@ -23,10 +23,10 @@ import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.ClientOpts;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -36,13 +36,17 @@ import com.beust.jcommander.Parameter;
 /**
  * Simple example for writing random data in sequential order to Accumulo.
  */
-public class SequentialBatchWriter {
+public final class SequentialBatchWriter {
 
   private static final Logger log = 
LoggerFactory.getLogger(SequentialBatchWriter.class);
 
+  static final String BATCH_TABLE = Common.NAMESPACE + ".batch";
+
+  private SequentialBatchWriter() {}
+
   public static Value createValue(long rowId, int size) {
     Random r = new Random(rowId);
-    byte value[] = new byte[size];
+    byte[] value = new byte[size];
 
     r.nextBytes(value);
 
@@ -56,7 +60,7 @@ public class SequentialBatchWriter {
 
   static class Opts extends ClientOpts {
     @Parameter(names = {"-t"}, description = "table to use")
-    public String tableName = "batch";
+    public String tableName = BATCH_TABLE;
 
     @Parameter(names = {"--start"}, description = "starting row")
     public Integer start = 0;
@@ -79,12 +83,7 @@ public class SequentialBatchWriter {
     opts.parseArgs(SequentialBatchWriter.class.getName(), args);
 
     try (AccumuloClient client = 
Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
-      try {
-        client.tableOperations().create(opts.tableName);
-      } catch (TableExistsException e) {
-        // ignore
-      }
-
+      Common.createTableWithNamespace(client, opts.tableName);
       try (BatchWriter bw = client.createBatchWriter(opts.tableName)) {
         for (int i = 0; i < opts.num; i++) {
           int row = i + opts.start;
diff --git 
a/src/main/java/org/apache/accumulo/examples/client/TracingExample.java 
b/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
index 899008c..551b43c 100644
--- a/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
+++ b/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
@@ -33,6 +33,7 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 // import org.apache.accumulo.core.trace.DistributedTrace;
+import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.ClientOnDefaultTable;
 import org.apache.accumulo.examples.cli.ScannerOpts;
 import org.apache.htrace.Sampler;
@@ -81,7 +82,7 @@ public class TracingExample {
       AccumuloSecurityException, TableExistsException {
 
     if (opts.createtable) {
-      client.tableOperations().create(opts.getTableName());
+      Common.createTableWithNamespace(client, opts.getTableName());
     }
 
     if (opts.createEntries) {
diff --git 
a/src/main/java/org/apache/accumulo/examples/combiner/StatsCombiner.java 
b/src/main/java/org/apache/accumulo/examples/combiner/StatsCombiner.java
index 2794c22..14b24ca 100644
--- a/src/main/java/org/apache/accumulo/examples/combiner/StatsCombiner.java
+++ b/src/main/java/org/apache/accumulo/examples/combiner/StatsCombiner.java
@@ -49,7 +49,7 @@ public class StatsCombiner extends Combiner {
     long count = 0;
 
     while (iter.hasNext()) {
-      String stats[] = iter.next().toString().split(",");
+      String[] stats = iter.next().toString().split(",");
 
       if (stats.length == 1) {
         long val = Long.parseLong(stats[0], radix);
diff --git 
a/src/main/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraint.java
 
b/src/main/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraint.java
index 0ad791e..321031c 100644
--- 
a/src/main/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraint.java
+++ 
b/src/main/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraint.java
@@ -28,19 +28,23 @@ import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.constraints.Constraint;
 import org.apache.accumulo.core.data.ColumnUpdate;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.constraints.Constraint;
+import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.ClientOpts;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is an accumulo constraint that ensures all fields of a key are 
alpha numeric.
  */
 public class AlphaNumKeyConstraint implements Constraint {
 
+  private static final Logger log = 
LoggerFactory.getLogger(AlphaNumKeyConstraint.class);
+
   static final short NON_ALPHA_NUM_ROW = 1;
   static final short NON_ALPHA_NUM_COLF = 2;
   static final short NON_ALPHA_NUM_COLQ = 3;
@@ -49,23 +53,20 @@ public class AlphaNumKeyConstraint implements Constraint {
   static final String COLF_VIOLATION_MESSAGE = "Column family was not alpha 
numeric";
   static final String COLQ_VIOLATION_MESSAGE = "Column qualifier was not alpha 
numeric";
 
-  private boolean isAlphaNum(byte bytes[]) {
+  private boolean isNotAlphaNum(byte[] bytes) {
     for (byte b : bytes) {
       boolean ok = ((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= 
'0' && b <= '9'));
       if (!ok)
-        return false;
+        return true;
     }
-
-    return true;
+    return false;
   }
 
   private Set<Short> addViolation(Set<Short> violations, short violation) {
     if (violations == null) {
       violations = new LinkedHashSet<>();
-      violations.add(violation);
-    } else if (!violations.contains(violation)) {
-      violations.add(violation);
     }
+    violations.add(violation);
     return violations;
   }
 
@@ -73,15 +74,15 @@ public class AlphaNumKeyConstraint implements Constraint {
   public List<Short> check(Environment env, Mutation mutation) {
     Set<Short> violations = null;
 
-    if (!isAlphaNum(mutation.getRow()))
+    if (isNotAlphaNum(mutation.getRow()))
       violations = addViolation(violations, NON_ALPHA_NUM_ROW);
 
     Collection<ColumnUpdate> updates = mutation.getUpdates();
     for (ColumnUpdate columnUpdate : updates) {
-      if (!isAlphaNum(columnUpdate.getColumnFamily()))
+      if (isNotAlphaNum(columnUpdate.getColumnFamily()))
         violations = addViolation(violations, NON_ALPHA_NUM_COLF);
 
-      if (!isAlphaNum(columnUpdate.getColumnQualifier()))
+      if (isNotAlphaNum(columnUpdate.getColumnQualifier()))
         violations = addViolation(violations, NON_ALPHA_NUM_COLQ);
     }
 
@@ -109,30 +110,26 @@ public class AlphaNumKeyConstraint implements Constraint {
     opts.parseArgs(AlphaNumKeyConstraint.class.getName(), args);
 
     try (AccumuloClient client = 
Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
-      try {
-        client.tableOperations().create("testConstraints");
-      } catch (TableExistsException e) {
-        // ignore
-      }
+      Common.createTableWithNamespace(client, 
ConstraintsCommon.CONSTRAINTS_TABLE);
 
-      /**
+      /*
        * Add the {@link AlphaNumKeyConstraint} to the table. Be sure to use 
the fully qualified
        * class name.
        */
-      int num = client.tableOperations().addConstraint("testConstraints",
+      int num = 
client.tableOperations().addConstraint(ConstraintsCommon.CONSTRAINTS_TABLE,
           "org.apache.accumulo.examples.constraints.AlphaNumKeyConstraint");
 
-      System.out.println("Attempting to write non alpha numeric data to 
testConstraints");
-      try (BatchWriter bw = client.createBatchWriter("testConstraints")) {
+      log.info("Attempting to write non alpha numeric data to 
testConstraints");
+      try (BatchWriter bw = 
client.createBatchWriter(ConstraintsCommon.CONSTRAINTS_TABLE)) {
         Mutation m = new Mutation("r1--$$@@%%");
         m.put("cf1", "cq1", new Value(("value1").getBytes()));
         bw.addMutation(m);
       } catch (MutationsRejectedException e) {
-        e.getConstraintViolationSummaries().forEach(violationSummary -> 
System.out
-            .println("Constraint violated: " + 
violationSummary.constrainClass));
+        e.getConstraintViolationSummaries().forEach(violationSummary -> log
+            .error(ConstraintsCommon.CONSTRAINT_VIOLATED_MSG, 
violationSummary.constrainClass));
       }
-
-      client.tableOperations().removeConstraint("testConstraints", num);
+      
client.tableOperations().removeConstraint(ConstraintsCommon.CONSTRAINTS_TABLE, 
num);
     }
   }
+
 }
diff --git 
a/src/main/java/org/apache/accumulo/examples/constraints/ConstraintsCommon.java 
b/src/main/java/org/apache/accumulo/examples/constraints/ConstraintsCommon.java
new file mode 100644
index 0000000..1ff579d
--- /dev/null
+++ 
b/src/main/java/org/apache/accumulo/examples/constraints/ConstraintsCommon.java
@@ -0,0 +1,9 @@
+package org.apache.accumulo.examples.constraints;
+
+import org.apache.accumulo.examples.Common;
+
+public enum ConstraintsCommon {
+  ;
+  public static final String CONSTRAINTS_TABLE = Common.NAMESPACE + 
".testConstraints";
+  public static final String CONSTRAINT_VIOLATED_MSG = "Constraint violated: 
{}";
+}
diff --git 
a/src/main/java/org/apache/accumulo/examples/constraints/MaxMutationSize.java 
b/src/main/java/org/apache/accumulo/examples/constraints/MaxMutationSize.java
index c98e0d7..81e4694 100644
--- 
a/src/main/java/org/apache/accumulo/examples/constraints/MaxMutationSize.java
+++ 
b/src/main/java/org/apache/accumulo/examples/constraints/MaxMutationSize.java
@@ -25,20 +25,25 @@ import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.constraints.Constraint;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.constraints.Constraint;
+import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.ClientOpts;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Ensure that mutations are a reasonable size: we must be able to fit several 
in memory at a time.
  */
 public class MaxMutationSize implements Constraint {
+
+  private static final Logger log = 
LoggerFactory.getLogger(MaxMutationSize.class);
+
   static final long MAX_SIZE = Runtime.getRuntime().maxMemory() >> 8;
   static final List<Short> empty = Collections.emptyList();
-  static final List<Short> violations = 
Collections.singletonList(Short.valueOf((short) 0));
+  static final List<Short> violations = Collections.singletonList((short) 0);
 
   @Override
   public String getViolationDescription(short violationCode) {
@@ -58,31 +63,26 @@ public class MaxMutationSize implements Constraint {
     opts.parseArgs(MaxMutationSize.class.getName(), args);
 
     try (AccumuloClient client = 
Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
-      try {
-        client.tableOperations().create("testConstraints");
-      } catch (TableExistsException e) {
-        // ignore
-      }
+      Common.createTableWithNamespace(client, 
ConstraintsCommon.CONSTRAINTS_TABLE);
 
-      /**
+      /*
        * Add the {@link MaxMutationSize} constraint to the table. Be sure to 
use the fully qualified
        * class name
        */
-      int num = client.tableOperations().addConstraint("testConstraints",
+      int num = 
client.tableOperations().addConstraint(ConstraintsCommon.CONSTRAINTS_TABLE,
           "org.apache.accumulo.examples.constraints.MaxMutationSize");
 
-      System.out.println("Attempting to write a lot of mutations to 
testConstraints");
-      try (BatchWriter bw = client.createBatchWriter("testConstraints")) {
+      log.info("Attempting to write a lot of mutations to testConstraints");
+      try (BatchWriter bw = 
client.createBatchWriter(ConstraintsCommon.CONSTRAINTS_TABLE)) {
         Mutation m = new Mutation("r1");
         for (int i = 0; i < 1_000_000; i++)
           m.put("cf" + i % 5000, "cq" + i, new Value(("value" + 
i).getBytes()));
         bw.addMutation(m);
       } catch (MutationsRejectedException e) {
         e.getConstraintViolationSummaries()
-            .forEach(m -> System.out.println("Constraint violated: " + 
m.constrainClass));
+            .forEach(m -> log.error(ConstraintsCommon.CONSTRAINT_VIOLATED_MSG, 
m.constrainClass));
       }
-
-      client.tableOperations().removeConstraint("testConstraints", num);
+      
client.tableOperations().removeConstraint(ConstraintsCommon.CONSTRAINTS_TABLE, 
num);
     }
   }
 }
diff --git 
a/src/main/java/org/apache/accumulo/examples/constraints/NumericValueConstraint.java
 
b/src/main/java/org/apache/accumulo/examples/constraints/NumericValueConstraint.java
index ca5a4d3..fe9d121 100644
--- 
a/src/main/java/org/apache/accumulo/examples/constraints/NumericValueConstraint.java
+++ 
b/src/main/java/org/apache/accumulo/examples/constraints/NumericValueConstraint.java
@@ -16,9 +16,7 @@
  */
 package org.apache.accumulo.examples.constraints;
 
-import java.util.Arrays;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.List;
 
 import org.apache.accumulo.core.client.Accumulo;
@@ -27,32 +25,34 @@ import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.constraints.Constraint;
 import org.apache.accumulo.core.data.ColumnUpdate;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.constraints.Constraint;
+import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.ClientOpts;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is an accumulo constraint that ensures values are numeric 
strings.
  */
 public class NumericValueConstraint implements Constraint {
 
+  private static final Logger log = 
LoggerFactory.getLogger(NumericValueConstraint.class);
+
   static final short NON_NUMERIC_VALUE = 1;
   static final String VIOLATION_MESSAGE = "Value is not numeric";
 
-  private static final List<Short> VIOLATION_LIST = Collections
-      .unmodifiableList(Arrays.asList(NON_NUMERIC_VALUE));
+  private static final List<Short> VIOLATION_LIST = List.of(NON_NUMERIC_VALUE);
 
-  private boolean isNumeric(byte bytes[]) {
+  private boolean isNumeric(byte[] bytes) {
     for (byte b : bytes) {
       boolean ok = (b >= '0' && b <= '9');
       if (!ok)
         return false;
     }
-
     return true;
   }
 
@@ -64,18 +64,14 @@ public class NumericValueConstraint implements Constraint {
       if (!isNumeric(columnUpdate.getValue()))
         return VIOLATION_LIST;
     }
-
     return null;
   }
 
   @Override
   public String getViolationDescription(short violationCode) {
-
-    switch (violationCode) {
-      case NON_NUMERIC_VALUE:
-        return "Value is not numeric";
+    if (violationCode == NON_NUMERIC_VALUE) {
+      return VIOLATION_MESSAGE;
     }
-
     return null;
   }
 
@@ -85,30 +81,25 @@ public class NumericValueConstraint implements Constraint {
     opts.parseArgs(NumericValueConstraint.class.getName(), args);
 
     try (AccumuloClient client = 
Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
-      try {
-        client.tableOperations().create("testConstraints");
-      } catch (TableExistsException e) {
-        // ignore
-      }
+      Common.createTableWithNamespace(client, 
ConstraintsCommon.CONSTRAINTS_TABLE);
 
-      /**
+      /*
        * Add the {@link NumericValueConstraint} constraint to the table. Be 
sure to use the fully
        * qualified class name
        */
-      int num = client.tableOperations().addConstraint("testConstraints",
+      int num = 
client.tableOperations().addConstraint(ConstraintsCommon.CONSTRAINTS_TABLE,
           "org.apache.accumulo.examples.constraints.NumericValueConstraint");
 
-      System.out.println("Attempting to write non numeric data to 
testConstraints");
-      try (BatchWriter bw = client.createBatchWriter("testConstraints")) {
+      log.info("Attempting to write non-numeric data to testConstraints");
+      try (BatchWriter bw = 
client.createBatchWriter(ConstraintsCommon.CONSTRAINTS_TABLE)) {
         Mutation m = new Mutation("r1");
         m.put("cf1", "cq1", new Value(("value1--$$@@%%").getBytes()));
         bw.addMutation(m);
       } catch (MutationsRejectedException e) {
         e.getConstraintViolationSummaries()
-            .forEach(m -> System.out.println("Constraint violated: " + 
m.constrainClass));
+            .forEach(m -> log.error(ConstraintsCommon.CONSTRAINT_VIOLATED_MSG, 
m.constrainClass));
       }
-
-      client.tableOperations().removeConstraint("testConstraints", num);
+      
client.tableOperations().removeConstraint(ConstraintsCommon.CONSTRAINTS_TABLE, 
num);
     }
   }
 }
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java 
b/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
index 24eab3b..d5f3b35 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
@@ -29,11 +29,14 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.LongCombiner;
 import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.BatchWriterOpts;
 import org.apache.accumulo.examples.cli.ClientOpts;
 import org.apache.accumulo.examples.filedata.ChunkCombiner;
 import org.apache.accumulo.examples.filedata.FileDataIngest;
 import org.apache.hadoop.io.Text;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.beust.jcommander.Parameter;
 
@@ -42,7 +45,14 @@ import com.beust.jcommander.Parameter;
  * into one Accumulo table, indexes the file names in a separate table, and 
the file data into a
  * third table.
  */
-public class Ingest {
+public final class Ingest {
+
+  private static final Logger log = LoggerFactory.getLogger(Ingest.class);
+
+  static final String DIR_TABLE = Common.NAMESPACE + ".dirTable";
+  static final String INDEX_TABLE = Common.NAMESPACE + ".indexTable";
+  static final String DATA_TABLE = Common.NAMESPACE + ".dataTable";
+
   static final Value nullValue = new Value(new byte[0]);
   public static final String LENGTH_CQ = "length";
   public static final String HIDDEN_CQ = "hidden";
@@ -51,6 +61,8 @@ public class Ingest {
   public static final String HASH_CQ = "md5";
   public static final Encoder<Long> encoder = LongCombiner.FIXED_LEN_ENCODER;
 
+  private Ingest() {}
+
   public static Mutation buildMutation(ColumnVisibility cv, String path, 
boolean isDir,
       boolean isHidden, boolean canExec, long length, long lastmod, String 
hash) {
     if (path.equals("/"))
@@ -73,13 +85,13 @@ public class Ingest {
   private static void ingest(File src, ColumnVisibility cv, BatchWriter dirBW, 
BatchWriter indexBW,
       FileDataIngest fdi, BatchWriter data) throws Exception {
     // build main table entry
-    String path = null;
+    String path;
     try {
       path = src.getCanonicalPath();
     } catch (IOException e) {
       path = src.getAbsolutePath();
     }
-    System.out.println(path);
+    log.info(path);
 
     String hash = null;
     if (!src.isDirectory()) {
@@ -126,11 +138,11 @@ public class Ingest {
 
   static class Opts extends ClientOpts {
     @Parameter(names = "--dirTable", description = "a table to hold the 
directory information")
-    String nameTable = "dirTable";
+    String dirTable = DIR_TABLE;
     @Parameter(names = "--indexTable", description = "an index over the 
ingested data")
-    String indexTable = "indexTable";
+    String indexTable = INDEX_TABLE;
     @Parameter(names = "--dataTable", description = "the file data, chunked 
into parts")
-    String dataTable = "dataTable";
+    String dataTable = DATA_TABLE;
     @Parameter(names = "--vis", description = "the visibility to mark the 
data",
         converter = VisibilityConverter.class)
     ColumnVisibility visibility = new ColumnVisibility();
@@ -146,17 +158,13 @@ public class Ingest {
     opts.parseArgs(Ingest.class.getName(), args, bwOpts);
 
     try (AccumuloClient client = opts.createAccumuloClient()) {
-      if (!client.tableOperations().exists(opts.nameTable))
-        client.tableOperations().create(opts.nameTable);
-      if (!client.tableOperations().exists(opts.indexTable))
-        client.tableOperations().create(opts.indexTable);
-      if (!client.tableOperations().exists(opts.dataTable)) {
-        client.tableOperations().create(opts.dataTable);
-        client.tableOperations().attachIterator(opts.dataTable,
-            new IteratorSetting(1, ChunkCombiner.class));
-      }
+      Common.createTableWithNamespace(client, opts.dirTable);
+      Common.createTableWithNamespace(client, opts.indexTable);
+      Common.createTableWithNamespace(client, opts.dataTable);
+      client.tableOperations().attachIterator(opts.dataTable,
+          new IteratorSetting(1, ChunkCombiner.class));
 
-      BatchWriter dirBW = client.createBatchWriter(opts.nameTable, 
bwOpts.getBatchWriterConfig());
+      BatchWriter dirBW = client.createBatchWriter(opts.dirTable, 
bwOpts.getBatchWriterConfig());
       BatchWriter indexBW = client.createBatchWriter(opts.indexTable,
           bwOpts.getBatchWriterConfig());
       BatchWriter dataBW = client.createBatchWriter(opts.dataTable, 
bwOpts.getBatchWriterConfig());
@@ -165,8 +173,8 @@ public class Ingest {
         recurse(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
 
         // fill in parent directory info
-        int slashIndex = -1;
-        while ((slashIndex = dir.lastIndexOf("/")) > 0) {
+        int slashIndex;
+        while ((slashIndex = dir.lastIndexOf('/')) > 0) {
           dir = dir.substring(0, slashIndex);
           ingest(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
         }
diff --git 
a/src/main/java/org/apache/accumulo/examples/filedata/CharacterHistogram.java 
b/src/main/java/org/apache/accumulo/examples/filedata/CharacterHistogram.java
index 5928786..e83bed2 100644
--- 
a/src/main/java/org/apache/accumulo/examples/filedata/CharacterHistogram.java
+++ 
b/src/main/java/org/apache/accumulo/examples/filedata/CharacterHistogram.java
@@ -23,7 +23,6 @@ import java.util.List;
 import java.util.Map.Entry;
 import java.util.Properties;
 
-import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
diff --git 
a/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java 
b/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java
index 5095ab6..d649ac8 100644
--- a/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java
+++ b/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java
@@ -33,9 +33,12 @@ import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.BatchWriterOpts;
 import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
 import org.apache.hadoop.io.Text;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.beust.jcommander.Parameter;
 
@@ -43,6 +46,9 @@ import com.beust.jcommander.Parameter;
  * Takes a list of files and archives them into Accumulo keyed on hashes of 
the files.
  */
 public class FileDataIngest {
+
+  private static final Logger log = 
LoggerFactory.getLogger(FileDataIngest.class);
+
   public static final Text CHUNK_CF = new Text("~chunk");
   public static final Text REFS_CF = new Text("refs");
   public static final String REFS_ORIG_FILE = "name";
@@ -52,6 +58,9 @@ public class FileDataIngest {
   public static final ByteSequence REFS_CF_BS = new 
ArrayByteSequence(REFS_CF.getBytes(), 0,
       REFS_CF.getLength());
 
+  public static final String TABLE_EXISTS_MSG = "Table already exists. User 
may wish to delete existing "
+      + "table and re-run example. Table name: ";
+
   int chunkSize;
   byte[] chunkSizeBytes;
   byte[] buf;
@@ -194,11 +203,10 @@ public class FileDataIngest {
     opts.parseArgs(FileDataIngest.class.getName(), args, bwOpts);
 
     try (AccumuloClient client = opts.createAccumuloClient()) {
-      if (!client.tableOperations().exists(opts.getTableName())) {
-        client.tableOperations().create(opts.getTableName());
-        client.tableOperations().attachIterator(opts.getTableName(),
-            new IteratorSetting(1, ChunkCombiner.class));
-      }
+      Common.createTableWithNamespace(client, opts.getTableName());
+      client.tableOperations().attachIterator(opts.getTableName(),
+          new IteratorSetting(1, ChunkCombiner.class));
+
       try (BatchWriter bw = client.createBatchWriter(opts.getTableName(),
           bwOpts.getBatchWriterConfig())) {
         FileDataIngest fdi = new FileDataIngest(opts.chunkSize, 
opts.visibility);
diff --git a/src/main/java/org/apache/accumulo/examples/helloworld/Insert.java 
b/src/main/java/org/apache/accumulo/examples/helloworld/Insert.java
index 3ee8864..a7884be 100644
--- a/src/main/java/org/apache/accumulo/examples/helloworld/Insert.java
+++ b/src/main/java/org/apache/accumulo/examples/helloworld/Insert.java
@@ -21,10 +21,10 @@ import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.ClientOpts;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -36,19 +36,17 @@ public class Insert {
 
   private static final Logger log = LoggerFactory.getLogger(Insert.class);
 
+  static final String HELLO_TABLE = Common.NAMESPACE + ".hellotable";
+
   public static void main(String[] args)
       throws AccumuloException, AccumuloSecurityException, 
TableNotFoundException {
     ClientOpts opts = new ClientOpts();
     opts.parseArgs(Insert.class.getName(), args);
 
     try (AccumuloClient client = 
Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
-      try {
-        client.tableOperations().create("hellotable");
-      } catch (TableExistsException e) {
-        // ignore
-      }
+      Common.createTableWithNamespace(client, HELLO_TABLE);
 
-      try (BatchWriter bw = client.createBatchWriter("hellotable")) {
+      try (BatchWriter bw = client.createBatchWriter(HELLO_TABLE)) {
         log.trace("writing ...");
         for (int i = 0; i < 10000; i++) {
           Mutation m = new Mutation(String.format("row_%d", i));
diff --git a/src/main/java/org/apache/accumulo/examples/helloworld/Read.java 
b/src/main/java/org/apache/accumulo/examples/helloworld/Read.java
index 41400d0..e9e82b7 100644
--- a/src/main/java/org/apache/accumulo/examples/helloworld/Read.java
+++ b/src/main/java/org/apache/accumulo/examples/helloworld/Read.java
@@ -42,13 +42,14 @@ public class Read {
     opts.parseArgs(Read.class.getName(), args);
 
     try (AccumuloClient client = 
Accumulo.newClient().from(opts.getClientPropsPath()).build();
-        Scanner scan = client.createScanner("hellotable", 
Authorizations.EMPTY)) {
+        Scanner scan = client.createScanner(Insert.HELLO_TABLE, 
Authorizations.EMPTY)) {
       scan.setRange(new Range(new Key("row_0"), new Key("row_1002")));
       for (Entry<Key,Value> e : scan) {
         Key key = e.getKey();
         log.trace(key.getRow() + " " + key.getColumnFamily() + " " + 
key.getColumnQualifier() + " "
             + e.getValue());
       }
+      log.info("Scan complete");
     }
   }
 }
diff --git 
a/src/main/java/org/apache/accumulo/examples/isolation/InterferenceTest.java 
b/src/main/java/org/apache/accumulo/examples/isolation/InterferenceTest.java
index 6de2929..61cea3b 100644
--- a/src/main/java/org/apache/accumulo/examples/isolation/InterferenceTest.java
+++ b/src/main/java/org/apache/accumulo/examples/isolation/InterferenceTest.java
@@ -28,6 +28,7 @@ import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.BatchWriterOpts;
 import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
 import org.apache.hadoop.io.Text;
@@ -46,13 +47,19 @@ import com.beust.jcommander.Parameter;
  *
  */
 
-public class InterferenceTest {
+public final class InterferenceTest {
 
   private static final int NUM_ROWS = 500;
   private static final int NUM_COLUMNS = 113; // scanner batches 1000 by 
default, so make num
                                               // columns not a multiple of 10
+  private static final String ERROR_MISSING_COLS = "ERROR Did not see {} 
columns in row {}";
+  private static final String ERROR_MULTIPLE_VALS = "ERROR Columns in row {} 
had multiple values "
+      + "{}";
+
   private static final Logger log = 
LoggerFactory.getLogger(InterferenceTest.class);
 
+  private InterferenceTest() {}
+
   static class Writer implements Runnable {
 
     private final BatchWriter bw;
@@ -95,7 +102,7 @@ public class InterferenceTest {
 
   static class Reader implements Runnable {
 
-    private Scanner scanner;
+    private final Scanner scanner;
     volatile boolean stop = false;
 
     Reader(Scanner scanner) {
@@ -118,10 +125,10 @@ public class InterferenceTest {
 
           if (!row.equals(entry.getKey().getRowData())) {
             if (count != NUM_COLUMNS)
-              System.err.println("ERROR Did not see " + NUM_COLUMNS + " 
columns in row " + row);
+              log.error(ERROR_MISSING_COLS, NUM_COLUMNS, row);
 
             if (values.size() > 1)
-              System.err.println("ERROR Columns in row " + row + " had 
multiple values " + values);
+              log.error(ERROR_MULTIPLE_VALS, row, values);
 
             row = entry.getKey().getRowData();
             count = 0;
@@ -134,10 +141,10 @@ public class InterferenceTest {
         }
 
         if (count > 0 && count != NUM_COLUMNS)
-          System.err.println("ERROR Did not see " + NUM_COLUMNS + " columns in 
row " + row);
+          log.error(ERROR_MISSING_COLS, NUM_COLUMNS, row);
 
         if (values.size() > 1)
-          System.err.println("ERROR Columns in row " + row + " had multiple 
values " + values);
+          log.error(ERROR_MULTIPLE_VALS, row, values);
       }
     }
 
@@ -162,9 +169,7 @@ public class InterferenceTest {
       opts.iterations = Long.MAX_VALUE;
 
     try (AccumuloClient client = opts.createAccumuloClient()) {
-      if (!client.tableOperations().exists(opts.getTableName()))
-        client.tableOperations().create(opts.getTableName());
-
+      Common.createTableWithNamespace(client, opts.getTableName());
       Thread writer = new Thread(
           new Writer(client.createBatchWriter(opts.getTableName(), 
bwOpts.getBatchWriterConfig()),
               opts.iterations));
@@ -180,7 +185,7 @@ public class InterferenceTest {
       writer.join();
       r.stopNow();
       reader.join();
-      System.out.println("finished");
+      log.info("finished");
     }
   }
 }
diff --git 
a/src/main/java/org/apache/accumulo/examples/mapreduce/NGramIngest.java 
b/src/main/java/org/apache/accumulo/examples/mapreduce/NGramIngest.java
index 23937f6..2755858 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/NGramIngest.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/NGramIngest.java
@@ -23,6 +23,7 @@ import java.util.TreeSet;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.ClientOpts;
 import org.apache.accumulo.hadoop.mapreduce.AccumuloOutputFormat;
 import org.apache.hadoop.fs.Path;
@@ -89,7 +90,7 @@ public class NGramIngest {
     try (AccumuloClient client = opts.createAccumuloClient()) {
       if (!client.tableOperations().exists(opts.tableName)) {
         log.info("Creating table " + opts.tableName);
-        client.tableOperations().create(opts.tableName);
+        Common.createTableWithNamespace(client, opts.tableName);
         SortedSet<Text> splits = new TreeSet<>();
         String numbers[] = "1 2 3 4 5 6 7 8 9".split("\\s");
         String lower[] = "a b c d e f g h i j k l m n o p q r s t u v w x y 
z".split("\\s");
diff --git 
a/src/main/java/org/apache/accumulo/examples/mapreduce/WordCount.java 
b/src/main/java/org/apache/accumulo/examples/mapreduce/WordCount.java
index 1864fe3..d86be5a 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/WordCount.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/WordCount.java
@@ -23,9 +23,9 @@ import java.util.Date;
 
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.iterators.user.SummingCombiner;
+import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.ClientOpts;
 import org.apache.accumulo.hadoop.mapreduce.AccumuloOutputFormat;
 import org.apache.hadoop.fs.Path;
@@ -42,13 +42,15 @@ import com.beust.jcommander.Parameter;
 /**
  * A simple MapReduce job that inserts word counts into Accumulo. See 
docs/mapred.md
  */
-public class WordCount {
+public final class WordCount {
 
   private static final Logger log = LoggerFactory.getLogger(WordCount.class);
 
+  private WordCount() {}
+
   static class Opts extends ClientOpts {
     @Parameter(names = {"-t", "--table"}, description = "Name of output 
Accumulo table")
-    String tableName = "wordCount";
+    String tableName = Common.NAMESPACE + ".wordcount";
     @Parameter(names = {"-i", "--input"}, required = true, description = "HDFS 
input directory")
     String inputDirectory;
     @Parameter(names = {"-d", "--dfsPath"},
@@ -81,14 +83,12 @@ public class WordCount {
 
     // Create Accumulo table and attach Summing iterator
     try (AccumuloClient client = opts.createAccumuloClient()) {
-      client.tableOperations().create(opts.tableName);
+      Common.createTableWithNamespace(client, opts.tableName);
       IteratorSetting is = new IteratorSetting(10, SummingCombiner.class);
       SummingCombiner.setColumns(is,
           Collections.singletonList(new IteratorSetting.Column("count")));
       SummingCombiner.setEncodingType(is, SummingCombiner.Type.STRING);
       client.tableOperations().attachIterator(opts.tableName, is);
-    } catch (TableExistsException e) {
-      // ignore
     }
 
     // Create M/R job
diff --git 
a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java
 
b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java
index 6b27309..0225731 100644
--- 
a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java
+++ 
b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java
@@ -37,12 +37,17 @@ import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.Reducer;
 import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Example map reduce job that bulk ingest data into an accumulo table. The 
expected input is text
  * files containing tab separated key value pairs on each line.
  */
 public final class BulkIngestExample {
+
+  private static final Logger log = 
LoggerFactory.getLogger(BulkIngestExample.class);
+
   static final String workDir = "tmp/bulkWork";
   static final String inputDir = "bulk";
   static final String outputFile = "bulk/test_1.txt";
@@ -141,7 +146,8 @@ public final class BulkIngestExample {
 
       try (PrintStream out = new PrintStream(
           new BufferedOutputStream(fs.create(new Path(workDir + 
SPLITS_TXT))))) {
-        Collection<Text> splits = 
client.tableOperations().listSplits(SetupTable.tableName, 100);
+        Collection<Text> splits = 
client.tableOperations().listSplits(SetupTable.BULK_INGEST_TABLE,
+            100);
         for (Text split : splits)
           out.println(Base64.getEncoder().encodeToString(split.copyBytes()));
         job.setNumReduceTasks(splits.size() + 1);
@@ -158,9 +164,9 @@ public final class BulkIngestExample {
       FsShell fsShell = new FsShell(opts.getHadoopConfig());
       fsShell.run(new String[] {"-chmod", "-R", "777", workDir});
       System.err.println("Importing Directory '" + workDir + SLASH_FILES + "' 
to table '"
-          + SetupTable.tableName + "'");
-      client.tableOperations().importDirectory(workDir + 
SLASH_FILES).to(SetupTable.tableName)
-          .load();
+          + SetupTable.BULK_INGEST_TABLE + "'");
+      client.tableOperations().importDirectory(workDir + SLASH_FILES)
+          .to(SetupTable.BULK_INGEST_TABLE).load();
     }
     return job.isSuccessful() ? 0 : 1;
   }
diff --git 
a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/SetupTable.java 
b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/SetupTable.java
index ef4edb3..259f77b 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/SetupTable.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/SetupTable.java
@@ -20,34 +20,35 @@ import java.util.TreeSet;
 
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
-import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.ClientOpts;
 import org.apache.hadoop.io.Text;
 
 public final class SetupTable {
 
-  static final String[] splits = {"row_00000333", "row_00000666"};
-  static final String tableName = "test_bulk";
+  static final String BULK_INGEST_TABLE = Common.NAMESPACE + ".test_bulk";
 
   private SetupTable() {}
 
-  public static void main(String[] args) throws Exception {
+  public static void main(String[] args)
+      throws AccumuloSecurityException, TableNotFoundException, 
AccumuloException {
+
+    final String[] splits = {"row_00000333", "row_00000666"};
     ClientOpts opts = new ClientOpts();
     opts.parseArgs(SetupTable.class.getName(), args);
 
     try (AccumuloClient client = 
Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
-      try {
-        client.tableOperations().create(tableName);
-      } catch (TableExistsException e) {
-        // ignore
-      }
+      Common.createTableWithNamespace(client, BULK_INGEST_TABLE);
 
       // create a table with initial partitions
       TreeSet<Text> initialPartitions = new TreeSet<>();
       for (String split : splits) {
         initialPartitions.add(new Text(split));
       }
-      client.tableOperations().addSplits(tableName, initialPartitions);
+      client.tableOperations().addSplits(BULK_INGEST_TABLE, initialPartitions);
     }
   }
 }
diff --git 
a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/VerifyIngest.java 
b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/VerifyIngest.java
index dc354b2..3e6bf0c 100644
--- 
a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/VerifyIngest.java
+++ 
b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/VerifyIngest.java
@@ -45,7 +45,8 @@ public final class VerifyIngest {
     opts.parseArgs(VerifyIngest.class.getName(), args);
 
     try (AccumuloClient client = 
Accumulo.newClient().from(opts.getClientPropsPath()).build();
-        Scanner scanner = client.createScanner(SetupTable.tableName, 
Authorizations.EMPTY)) {
+        Scanner scanner = client.createScanner(SetupTable.BULK_INGEST_TABLE,
+            Authorizations.EMPTY)) {
 
       scanner.setRange(new Range(String.format(ROW_FORMAT, 0), null));
 
@@ -59,30 +60,30 @@ public final class VerifyIngest {
           Entry<Key,Value> entry = si.next();
 
           if 
(!entry.getKey().getRow().toString().equals(String.format(ROW_FORMAT, i))) {
-            log.error("unexpected row key {}; expected {}", 
entry.getKey().getRow(),
-                String.format(ROW_FORMAT, i));
+            String formattedRow = String.format(ROW_FORMAT, i);
+            log.error("unexpected row key {}; expected {}", 
entry.getKey().getRow(), formattedRow);
             ok = false;
           }
 
           if (!entry.getValue().toString().equals(String.format(VALUE_FORMAT, 
i))) {
-            log.error("unexpected value {}; expected {}", entry.getValue(),
-                String.format(VALUE_FORMAT, i));
+            var formattedValue = String.format(VALUE_FORMAT, i);
+            log.error("unexpected value {}; expected {}", entry.getValue(), 
formattedValue);
             ok = false;
           }
 
         } else {
-          log.error("no more rows, expected {}", String.format(ROW_FORMAT, i));
+          var formattedRow = String.format(ROW_FORMAT, i);
+          log.error("no more rows, expected {}", formattedRow);
           ok = false;
           break;
         }
-
       }
 
       if (ok) {
-        System.out.println("Data verification succeeded!");
+        log.info("Data verification succeeded!");
         System.exit(0);
       } else {
-        System.out.println("Data verification failed!");
+        log.info("Data verification failed!");
         System.exit(1);
       }
     }
diff --git 
a/src/main/java/org/apache/accumulo/examples/sample/SampleExample.java 
b/src/main/java/org/apache/accumulo/examples/sample/SampleExample.java
index 5447e28..304f33a 100644
--- a/src/main/java/org/apache/accumulo/examples/sample/SampleExample.java
+++ b/src/main/java/org/apache/accumulo/examples/sample/SampleExample.java
@@ -24,6 +24,7 @@ import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.SampleNotPresentException;
 import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.admin.CompactionConfig;
 import org.apache.accumulo.core.client.admin.CompactionStrategyConfig;
 import org.apache.accumulo.core.client.sample.RowSampler;
@@ -36,6 +37,8 @@ import org.apache.accumulo.examples.cli.BatchWriterOpts;
 import org.apache.accumulo.examples.cli.ClientOnDefaultTable;
 import org.apache.accumulo.examples.client.RandomBatchWriter;
 import org.apache.accumulo.examples.shard.CutoffIntersectingIterator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableMap;
 
@@ -46,6 +49,8 @@ import com.google.common.collect.ImmutableMap;
  */
 public class SampleExample {
 
+  private static final Logger log = 
LoggerFactory.getLogger(SampleExample.class);
+
   // a compaction strategy that only selects files for compaction that have no 
sample data or sample
   // data created in a different way than the tables
   static final CompactionStrategyConfig NO_SAMPLE_STRATEGY = new 
CompactionStrategyConfig(
@@ -54,7 +59,7 @@ public class SampleExample {
 
   static class Opts extends ClientOnDefaultTable {
     public Opts() {
-      super("sampex");
+      super("examples.sampex");
     }
   }
 
@@ -64,11 +69,11 @@ public class SampleExample {
     opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
 
     try (AccumuloClient client = opts.createAccumuloClient()) {
-
-      if (!client.tableOperations().exists(opts.getTableName())) {
+      try {
         client.tableOperations().create(opts.getTableName());
-      } else {
-        System.out.println("Table exists, not doing anything.");
+      } catch (TableExistsException e) {
+        System.out.println("Table exists, not doing anything. Delete table " + 
opts.getTableName()
+            + " and re-run");
         return;
       }
 
diff --git a/src/main/java/org/apache/accumulo/examples/shard/Reverse.java 
b/src/main/java/org/apache/accumulo/examples/shard/Reverse.java
index 2d93f4c..139d9cd 100644
--- a/src/main/java/org/apache/accumulo/examples/shard/Reverse.java
+++ b/src/main/java/org/apache/accumulo/examples/shard/Reverse.java
@@ -41,10 +41,10 @@ public class Reverse {
   static class Opts extends ClientOpts {
 
     @Parameter(names = "--shardTable", description = "name of the shard table")
-    String shardTable = "shard";
+    String shardTable;
 
     @Parameter(names = "--doc2Term", description = "name of the doc2Term 
table")
-    String doc2TermTable = "doc2Term";
+    String doc2TermTable;
   }
 
   public static void main(String[] args) throws Exception {

Reply via email to