http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
----------------------------------------------------------------------
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
index f7f8727..96a6a60 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Consistency;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
@@ -56,7 +55,6 @@ import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.RegionSplitter;
@@ -81,7 +79,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 
 /**
  * Test Bulk Load and MR on a distributed cluster.
@@ -151,8 +148,8 @@ public class IntegrationTestBulkLoad extends 
IntegrationTestBase {
   public static class SlowMeCoproScanOperations extends BaseRegionObserver {
     static final AtomicLong sleepTime = new AtomicLong(2000);
     Random r = new Random();
-    AtomicLong countOfNext = new AtomicLong(0); 
-    AtomicLong countOfOpen = new AtomicLong(0); 
+    AtomicLong countOfNext = new AtomicLong(0);
+    AtomicLong countOfOpen = new AtomicLong(0);
     public SlowMeCoproScanOperations() {}
     @Override
     public RegionScanner preScannerOpen(final 
ObserverContext<RegionCoprocessorEnvironment> e,
@@ -185,7 +182,7 @@ public class IntegrationTestBulkLoad extends 
IntegrationTestBase {
         } catch (InterruptedException e1) {
           LOG.error(e1);
         }
-      } 
+      }
     }
   }
 
@@ -196,7 +193,7 @@ public class IntegrationTestBulkLoad extends 
IntegrationTestBase {
     int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, 
NUM_REPLICA_COUNT_DEFAULT);
     if (replicaCount == NUM_REPLICA_COUNT_DEFAULT) return;
 
-    TableName t = TableName.valueOf(getTablename());
+    TableName t = getTablename();
     Admin admin = util.getHBaseAdmin();
     HTableDescriptor desc = admin.getTableDescriptor(t);
     desc.addCoprocessor(SlowMeCoproScanOperations.class.getName());
@@ -227,12 +224,12 @@ public class IntegrationTestBulkLoad extends 
IntegrationTestBase {
   }
 
   private void setupTable() throws IOException, InterruptedException {
-    if (util.getHBaseAdmin().tableExists(TableName.valueOf(getTablename()))) {
+    if (util.getHBaseAdmin().tableExists(getTablename())) {
       util.deleteTable(getTablename());
     }
 
     util.createTable(
-        Bytes.toBytes(getTablename()),
+        getTablename().getName(),
         new byte[][]{CHAIN_FAM, SORT_FAM, DATA_FAM},
         getSplits(16)
     );
@@ -240,7 +237,7 @@ public class IntegrationTestBulkLoad extends 
IntegrationTestBase {
     int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, 
NUM_REPLICA_COUNT_DEFAULT);
     if (replicaCount == NUM_REPLICA_COUNT_DEFAULT) return;
 
-    TableName t = TableName.valueOf(getTablename());
+    TableName t = getTablename();
     HBaseTestingUtility.setReplicas(util.getHBaseAdmin(), t, replicaCount);
   }
 
@@ -663,7 +660,7 @@ public class IntegrationTestBulkLoad extends 
IntegrationTestBase {
     }
 
     TableMapReduceUtil.initTableMapperJob(
-        Bytes.toBytes(getTablename()),
+        getTablename().getName(),
         scan,
         LinkedListCheckingMapper.class,
         LinkKey.class,
@@ -731,8 +728,8 @@ public class IntegrationTestBulkLoad extends 
IntegrationTestBase {
   }
 
   @Override
-  public String getTablename() {
-    return getConf().get(TABLE_NAME_KEY, TABLE_NAME);
+  public TableName getTablename() {
+    return TableName.valueOf(getConf().get(TABLE_NAME_KEY, TABLE_NAME));
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
----------------------------------------------------------------------
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
index 5c7fb45..26de202 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
@@ -52,14 +52,14 @@ import org.junit.experimental.categories.Category;
  *
  * Then the test creates a snapshot from this table, and overrides the values 
in the original
  * table with values 'after_snapshot_value'. The test, then runs a mapreduce 
job over the snapshot
- * with a scan start row 'bbb' and stop row 'yyy'. The data is saved in a 
single reduce output 
+ * with a scan start row 'bbb' and stop row 'yyy'. The data is saved in a 
single reduce output
  * file, and
  * inspected later to verify that the MR job has seen all the values from the 
snapshot.
  *
  * <p> These parameters can be used to configure the job:
  * <br>"IntegrationTestTableSnapshotInputFormat.table" =&gt; the name of the 
table
  * <br>"IntegrationTestTableSnapshotInputFormat.snapshot" =&gt; the name of 
the snapshot
- * <br>"IntegrationTestTableSnapshotInputFormat.numRegions" =&gt; number of 
regions in the table 
+ * <br>"IntegrationTestTableSnapshotInputFormat.numRegions" =&gt; number of 
regions in the table
  * to be created (default, 32).
  * <br>"IntegrationTestTableSnapshotInputFormat.tableDir" =&gt; temporary 
directory to restore the
  * snapshot files
@@ -74,9 +74,9 @@ public class IntegrationTestTableSnapshotInputFormat extends 
IntegrationTestBase
   private static final String TABLE_NAME_KEY = 
"IntegrationTestTableSnapshotInputFormat.table";
   private static final String DEFAULT_TABLE_NAME = 
"IntegrationTestTableSnapshotInputFormat";
 
-  private static final String SNAPSHOT_NAME_KEY = 
+  private static final String SNAPSHOT_NAME_KEY =
       "IntegrationTestTableSnapshotInputFormat.snapshot";
-  private static final String NUM_REGIONS_KEY = 
+  private static final String NUM_REGIONS_KEY =
       "IntegrationTestTableSnapshotInputFormat.numRegions";
 
   private static final String MR_IMPLEMENTATION_KEY =
@@ -175,7 +175,7 @@ public class IntegrationTestTableSnapshotInputFormat 
extends IntegrationTestBase
   }
 
   @Override // CM is not intended to be run with this test
-  public String getTablename() {
+  public TableName getTablename() {
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
----------------------------------------------------------------------
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
index 31c67af..65e1026 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
@@ -190,7 +190,7 @@ public class IntegrationTestMTTR {
 
     // Set up the action that will move the regions of our table.
     moveRegionAction = new MoveRegionsOfTableAction(sleepTime,
-        MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, 
tableName.getNameAsString());
+        MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, tableName);
 
     // Kill the master
     restartMasterAction = new RestartActiveMasterAction(1000);
@@ -346,6 +346,7 @@ public class IntegrationTestMTTR {
       }
     }
 
+    @Override
     public String toString() {
       Objects.ToStringHelper helper = Objects.toStringHelper(this)
           .add("numResults", stats.getN())

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
----------------------------------------------------------------------
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index d670a5e..3adef26 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -1153,9 +1153,9 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
   }
 
   @Override
-  public String getTablename() {
+  public TableName getTablename() {
     Configuration c = getConf();
-    return c.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME);
+    return TableName.valueOf(c.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME));
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java
----------------------------------------------------------------------
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java
index 322dd81..0da5107 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java
@@ -120,6 +120,7 @@ public class IntegrationTestLoadAndVerify  extends 
IntegrationTestBase  {
     REFERENCES_CHECKED
   }
 
+  @Override
   public void setUpCluster() throws Exception {
     util = getTestingUtil(getConf());
     util.initializeCluster(3);
@@ -421,6 +422,7 @@ public void cleanUpCluster() throws Exception {
     toRun = args[0];
   }
 
+  @Override
   public int runTestFromCommandLine() throws Exception {
     IntegrationTestingUtility.setUseDistributedCluster(getConf());
     boolean doLoad = false;
@@ -442,8 +444,8 @@ public void cleanUpCluster() throws Exception {
     }
 
     // create HTableDescriptor for specified table
-    String table = getTablename();
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
+    TableName table = getTablename();
+    HTableDescriptor htd = new HTableDescriptor(table);
     htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
 
     HBaseAdmin admin = new HBaseAdmin(getConf());
@@ -461,8 +463,8 @@ public void cleanUpCluster() throws Exception {
   }
 
   @Override
-  public String getTablename() {
-    return getConf().get(TABLE_NAME_KEY, TEST_NAME);
+  public TableName getTablename() {
+    return TableName.valueOf(getConf().get(TABLE_NAME_KEY, TEST_NAME));
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
----------------------------------------------------------------------
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
index 083fca2..5ca0e36 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.chaos.factories.MonkeyFactory;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
@@ -167,8 +166,8 @@ public class 
IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr
       Threads.sleep(refreshTime);
     } else {
       LOG.info("Reopening the table");
-      admin.disableTable(TableName.valueOf(getTablename()));
-      admin.enableTable(TableName.valueOf(getTablename()));
+      admin.disableTable(getTablename());
+      admin.enableTable(getTablename());
     }
 
     // We should only start the ChaosMonkey after the readers are started and 
have cached

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java
----------------------------------------------------------------------
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java
index 32cdfa2..f4e4250 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.IntegrationTestingUtility;
 import org.apache.hadoop.hbase.IntegrationTests;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -65,19 +64,19 @@ import org.junit.experimental.categories.Category;
  * that) added with visibility expressions. In load step, 200 map tasks are 
launched, which in turn
  * write loadmapper.num_to_write (default 100K) rows to an hbase table. Rows 
are written in blocks,
  * for a total of 100 blocks.
- * 
+ *
  * Verify step scans the table as both users with Authorizations. This step 
asserts that user can
  * see only those rows (and so cells) with visibility for which they have 
label auth.
- * 
+ *
  * This class can be run as a unit test, as an integration test, or from the 
command line.
- * 
+ *
  * Originally taken from Apache Bigtop.
  * Issue user names as comma seperated list.
  *./hbase IntegrationTestWithCellVisibilityLoadAndVerify -u usera,userb
  */
 @Category(IntegrationTests.class)
 public class IntegrationTestWithCellVisibilityLoadAndVerify extends 
IntegrationTestLoadAndVerify {
-  private static final String ERROR_STR = 
+  private static final String ERROR_STR =
       "Two user names are to be specified seperated by a ',' like 
'usera,userb'";
   private static final char NOT = '!';
   private static final char OR = '|';
@@ -257,10 +256,12 @@ public class 
IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT
     return job;
   }
 
+  @Override
   protected void setMapperClass(Job job) {
     job.setMapperClass(LoadWithCellVisibilityMapper.class);
   }
 
+  @Override
   protected void doVerify(final Configuration conf, final HTableDescriptor 
htd) throws Exception {
     System.out.println(String.format("Verifying for auths %s, %s, %s, %s", 
CONFIDENTIAL, TOPSECRET,
         SECRET, PRIVATE));
@@ -343,6 +344,7 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify 
extends IntegrationT
     job.getConfiguration().setInt(TableRecordReaderImpl.LOG_PER_ROW_COUNT, 
(int) lpr);
   }
 
+  @Override
   public void usage() {
     System.err.println(this.getClass().getSimpleName() + " -u usera,userb 
[-Doptions]");
     System.err.println("  Loads a table with cell visibilities and verifies 
with Authorizations");
@@ -359,12 +361,12 @@ public class 
IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT
         + "Number hbase scanner caching rows to read (default 50)");
   }
 
+  @Override
   public int runTestFromCommandLine() throws Exception {
     IntegrationTestingUtility.setUseDistributedCluster(getConf());
     int numPresplits = getConf().getInt("loadmapper.numPresplits", 5);
     // create HTableDescriptor for specified table
-    String table = getTablename();
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
+    HTableDescriptor htd = new HTableDescriptor(getTablename());
     htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
 
     HBaseAdmin admin = new HBaseAdmin(getConf());

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java
----------------------------------------------------------------------
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java
index 86ccff3..7961eb9 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java
@@ -19,8 +19,6 @@
 package org.apache.hadoop.hbase.trace;
 
 import org.apache.commons.cli.CommandLine;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.IntegrationTestingUtility;
@@ -28,7 +26,6 @@ import org.apache.hadoop.hbase.IntegrationTests;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -38,10 +35,8 @@ import org.apache.hadoop.hbase.util.AbstractHBaseTool;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.util.ToolRunner;
 import org.htrace.Sampler;
-import org.htrace.Span;
 import org.htrace.Trace;
 import org.htrace.TraceScope;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -255,13 +250,13 @@ public class IntegrationTestSendTraceRequests extends 
AbstractHBaseTool {
           ht.put(p);
         }
         if ((x % 1000) == 0) {
-          admin.flush(tableName.toBytes());
+          admin.flush(tableName);
         }
       } finally {
         traceScope.close();
       }
     }
-    admin.flush(tableName.toBytes());
+    admin.flush(tableName);
     return rowKeys;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
index 66d23d9..a752f7a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.master.HMaster;
@@ -255,7 +254,7 @@ public class TestNamespace {
     
p.add(Bytes.toBytes("my_cf"),Bytes.toBytes("my_col"),Bytes.toBytes("value1"));
     table.put(p);
     //flush and read from disk to make sure directory changes are working
-    admin.flush(desc.getTableName().getName());
+    admin.flush(desc.getTableName());
     Get g = new Get(Bytes.toBytes("row1"));
     assertTrue(table.exists(g));
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
index c24b4e1..664ae04 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
@@ -119,7 +119,7 @@ public class TestAdmin {
 
   @Test (timeout=300000)
   public void testSplitFlushCompactUnknownTable() throws InterruptedException {
-    final String unknowntable = "fubar";
+    final TableName unknowntable = TableName.valueOf("fubar");
     Exception exception = null;
     try {
       this.admin.compact(unknowntable);
@@ -1023,10 +1023,11 @@ public class TestAdmin {
     scanner.next();
 
     // Split the table
-    this.admin.split(tableName.getName(), splitPoint);
+    this.admin.split(tableName, splitPoint);
 
     final AtomicInteger count = new AtomicInteger(0);
     Thread t = new Thread("CheckForSplit") {
+      @Override
       public void run() {
         for (int i = 0; i < 45; i++) {
           try {
@@ -1636,7 +1637,7 @@ public class TestAdmin {
     // make sure log.hflush() calls syncFs() to open a pipeline
     TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
     // lower the namenode & datanode heartbeat so the namenode
-    // quickly detects datanode failures  
+    // quickly detects datanode failures
     
TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 
5000);
     TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
     // the namenode might still try to choose the recently-dead datanode

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 34bd90b..0916e38 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -238,7 +238,7 @@ public class TestFromClientSide {
     */
    @Test
    public void testPurgeFutureDeletes() throws Exception {
-     final byte[] TABLENAME = Bytes.toBytes("testPurgeFutureDeletes");
+     final TableName TABLENAME = TableName.valueOf("testPurgeFutureDeletes");
      final byte[] ROW = Bytes.toBytes("row");
      final byte[] FAMILY = Bytes.toBytes("family");
      final byte[] COLUMN = Bytes.toBytes("column");

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
index d8baea4..77a8146 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
@@ -256,7 +256,7 @@ public class TestReplicaWithCluster {
     final HTable table = new HTable(HTU.getConfiguration(), 
hdt.getTableName());
     table.put(p);
 
-    HTU.getHBaseAdmin().flush(table.getTableName());
+    HTU.getHBaseAdmin().flush(table.getName());
     LOG.info("Put & flush done on the first cluster. Now doing a get on the 
same cluster.");
 
     Waiter.waitFor(HTU.getConfiguration(), 1000, new 
Waiter.Predicate<Exception>() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
index b300dfa..d0da0b2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hbase.client;
 
-import java.io.IOException;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -303,7 +302,7 @@ public class TestSnapshotCloneIndependence {
       originalRegionCount, cloneTableRegionCount);
 
     // Split a region on the parent table
-    admin.split(originalTableHRegions.get(0).getRegionName());
+    admin.splitRegion(originalTableHRegions.get(0).getRegionName());
     waitOnSplit(original, originalRegionCount);
 
     // Verify that the cloned table region is not split

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java
index 8d7c6d5..5732cc5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java
@@ -105,7 +105,7 @@ public class TestTableSnapshotScanner {
     util.loadTable(table, FAMILIES, value);
 
     // cause flush to create new files in the region
-    admin.flush(tableName.toString());
+    admin.flush(tableName);
     table.close();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
index b7319b7..61c1721 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
@@ -240,7 +239,7 @@ public class TestRegionObserverInterface {
 
   @Test
   public void testCheckAndPutHooks() throws IOException {
-    TableName tableName = 
+    TableName tableName =
         TableName.valueOf(TEST_TABLE.getNameAsString() + 
".testCheckAndPutHooks");
     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
     try {
@@ -251,14 +250,14 @@ public class TestRegionObserverInterface {
       p = new Put(Bytes.toBytes(0));
       p.add(A, A, A);
       verifyMethodResult(SimpleRegionObserver.class,
-          new String[] {"hadPreCheckAndPut", 
+          new String[] {"hadPreCheckAndPut",
               "hadPreCheckAndPutAfterRowLock", "hadPostCheckAndPut"},
           tableName,
           new Boolean[] {false, false, false}
           );
       table.checkAndPut(Bytes.toBytes(0), A, A, A, p);
       verifyMethodResult(SimpleRegionObserver.class,
-          new String[] {"hadPreCheckAndPut", 
+          new String[] {"hadPreCheckAndPut",
               "hadPreCheckAndPutAfterRowLock", "hadPostCheckAndPut"},
           tableName,
           new Boolean[] {true, true, true}
@@ -271,7 +270,7 @@ public class TestRegionObserverInterface {
 
   @Test
   public void testCheckAndDeleteHooks() throws IOException {
-    TableName tableName = 
+    TableName tableName =
         TableName.valueOf(TEST_TABLE.getNameAsString() + 
".testCheckAndDeleteHooks");
     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
     try {
@@ -282,14 +281,14 @@ public class TestRegionObserverInterface {
       Delete d = new Delete(Bytes.toBytes(0));
       table.delete(d);
       verifyMethodResult(SimpleRegionObserver.class,
-          new String[] {"hadPreCheckAndDelete", 
+          new String[] {"hadPreCheckAndDelete",
               "hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete"},
           tableName,
           new Boolean[] {false, false, false}
           );
       table.checkAndDelete(Bytes.toBytes(0), A, A, A, d);
       verifyMethodResult(SimpleRegionObserver.class,
-          new String[] {"hadPreCheckAndDelete", 
+          new String[] {"hadPreCheckAndDelete",
               "hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete"},
           tableName,
           new Boolean[] {true, true, true}
@@ -516,7 +515,7 @@ public class TestRegionObserverInterface {
 
     // force a compaction
     long ts = System.currentTimeMillis();
-    admin.flush(compactTable.toBytes());
+    admin.flush(compactTable);
     // wait for flush
     for (int i=0; i<10; i++) {
       if (compactor.lastFlush >= ts) {
@@ -528,7 +527,7 @@ public class TestRegionObserverInterface {
     LOG.debug("Flush complete");
 
     ts = compactor.lastFlush;
-    admin.majorCompact(compactTable.toBytes());
+    admin.majorCompact(compactTable);
     // wait for compaction
     for (int i=0; i<30; i++) {
       if (compactor.lastCompaction >= ts) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
index 5c05169..00f7c49 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
@@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.IsolationLevel;
 import org.apache.hadoop.hbase.client.Put;
@@ -214,14 +213,14 @@ public class TestRegionObserverScannerOpenHook {
    */
   public static class CompactionCompletionNotifyingRegion extends HRegion {
     private static volatile CountDownLatch compactionStateChangeLatch = null;
-    
+
     @SuppressWarnings("deprecation")
     public CompactionCompletionNotifyingRegion(Path tableDir, HLog log,
         FileSystem fs, Configuration confParam, HRegionInfo info,
         HTableDescriptor htd, RegionServerServices rsServices) {
       super(tableDir, log, fs, confParam, info, htd, rsServices);
     }
-    
+
     public CountDownLatch getCompactionStateChangeLatch() {
       if (compactionStateChangeLatch == null) compactionStateChangeLatch = new 
CountDownLatch(1);
       return compactionStateChangeLatch;
@@ -231,9 +230,9 @@ public class TestRegionObserverScannerOpenHook {
       boolean ret = super.compact(compaction, store);
       if (ret) compactionStateChangeLatch.countDown();
       return ret;
-    }    
+    }
   }
-  
+
   /**
    * Unfortunately, the easiest way to test this is to spin up a mini-cluster 
since we want to do
    * the usual compaction mechanism on the region, rather than going through 
the backdoor to the
@@ -270,16 +269,16 @@ public class TestRegionObserverScannerOpenHook {
     List<HRegion> regions = rs.getOnlineRegions(desc.getTableName());
     assertEquals("More than 1 region serving test table with 1 row", 1, 
regions.size());
     HRegion region = regions.get(0);
-    admin.flush(region.getRegionName());
+    admin.flushRegion(region.getRegionName());
     CountDownLatch latch = ((CompactionCompletionNotifyingRegion)region)
         .getCompactionStateChangeLatch();
-    
+
     // put another row and flush that too
     put = new Put(Bytes.toBytes("anotherrow"));
     put.add(A, A, A);
     table.put(put);
     table.flushCommits();
-    admin.flush(region.getRegionName());
+    admin.flushRegion(region.getRegionName());
 
     // run a compaction, which normally would should get rid of the data
     // wait for the compaction checker to complete

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java
index c9be776..d19437e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java
@@ -109,7 +109,7 @@ public class TestPrefixTree {
       put.add(fam, qual2, Bytes.toBytes("c2-value-3"));
       table.put(put);
       table.flushCommits();
-      hBaseAdmin.flush(tableName.getNameAsString());
+      hBaseAdmin.flush(tableName);
       String[] rows = new String[3];
       rows[0] = row1;
       rows[1] = row2;

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
index 18b9864..7c55ad0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -167,7 +166,7 @@ public abstract class TableSnapshotInputFormatTestBase {
     util.loadTable(table, FAMILIES, value);
 
     // cause flush to create new files in the region
-    admin.flush(tableName.toString());
+    admin.flush(tableName);
     table.close();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java
index 39d1bab..5fb67e5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -44,7 +43,6 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import com.google.common.base.Joiner;
 import org.junit.experimental.categories.Category;
 
 @Category(MediumTests.class)
@@ -70,12 +68,14 @@ public class TestAssignmentListener {
     public DummyAssignmentListener() {
     }
 
+    @Override
     public void regionOpened(final HRegionInfo regionInfo, final ServerName 
serverName) {
       LOG.info("Assignment open region=" + regionInfo + " server=" + 
serverName);
       openCount.incrementAndGet();
       modified.incrementAndGet();
     }
 
+    @Override
     public void regionClosed(final HRegionInfo regionInfo) {
       LOG.info("Assignment close region=" + regionInfo);
       closeCount.incrementAndGet();
@@ -103,12 +103,14 @@ public class TestAssignmentListener {
     public DummyServerListener() {
     }
 
+    @Override
     public void serverAdded(final ServerName serverName) {
       LOG.info("Server added " + serverName);
       addedCount.incrementAndGet();
       modified.incrementAndGet();
     }
 
+    @Override
     public void serverRemoved(final ServerName serverName) {
       LOG.info("Server removed " + serverName);
       removedCount.incrementAndGet();
@@ -216,7 +218,7 @@ public class TestAssignmentListener {
       // Split the table in two
       LOG.info("Split Table");
       listener.reset();
-      admin.split(TABLE_NAME_STR, "row-3");
+      admin.split(TABLE_NAME, Bytes.toBytes("row-3"));
       listener.awaitModifications(3);
       assertEquals(2, listener.getLoadCount());     // daughters added
       assertEquals(1, listener.getCloseCount());    // parent removed
@@ -226,7 +228,7 @@ public class TestAssignmentListener {
       int mergeable = 0;
       while (mergeable < 2) {
         Thread.sleep(100);
-        admin.majorCompact(TABLE_NAME_STR);
+        admin.majorCompact(TABLE_NAME);
         mergeable = 0;
         for (JVMClusterUtil.RegionServerThread regionThread: 
miniCluster.getRegionServerThreads()) {
           for (HRegion region: 
regionThread.getRegionServer().getOnlineRegions(TABLE_NAME)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
index 0802dea..1735382 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
@@ -1222,17 +1222,17 @@ public class TestDistributedLogSplitting {
     // use last 5 bytes because HBaseTestingUtility.createMultiRegions use 5 
bytes key
     row = Arrays.copyOfRange(row, 3, 8);
     long value = 0;
-    byte[] tableName = Bytes.toBytes("table");
+    TableName tableName = TableName.valueOf("table");
     byte[] family = Bytes.toBytes("family");
     byte[] qualifier = Bytes.toBytes("c1");
     long timeStamp = System.currentTimeMillis();
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
+    HTableDescriptor htd = new HTableDescriptor();
     htd.addFamily(new HColumnDescriptor(family));
     for (int i = 0; i < NUM_LOG_LINES; i += 1) {
       WALEdit e = new WALEdit();
       value++;
       e.add(new KeyValue(row, family, qualifier, timeStamp, 
Bytes.toBytes(value)));
-      hrs.getWAL().append(curRegionInfo, TableName.valueOf(tableName), e,
+      hrs.getWAL().append(curRegionInfo, tableName, e,
         System.currentTimeMillis(), htd, sequenceId);
     }
     hrs.getWAL().sync();
@@ -1314,17 +1314,17 @@ public class TestDistributedLogSplitting {
     // use last 5 bytes because HBaseTestingUtility.createMultiRegions use 5 
bytes key
     row = Arrays.copyOfRange(row, 3, 8);
     long value = 0;
-    final byte[] tableName = Bytes.toBytes("table");
+    final TableName tableName = TableName.valueOf("table");
     byte[] family = Bytes.toBytes("family");
     byte[] qualifier = Bytes.toBytes("c1");
     long timeStamp = System.currentTimeMillis();
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
+    HTableDescriptor htd = new HTableDescriptor(tableName);
     htd.addFamily(new HColumnDescriptor(family));
     for (int i = 0; i < NUM_LOG_LINES; i += 1) {
       WALEdit e = new WALEdit();
       value++;
       e.add(new KeyValue(row, family, qualifier, timeStamp, 
Bytes.toBytes(value)));
-      hrs.getWAL().append(curRegionInfo, TableName.valueOf(tableName), e,
+      hrs.getWAL().append(curRegionInfo, tableName, e,
         System.currentTimeMillis(), htd, sequenceId);
     }
     hrs.getWAL().sync();

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
index 6bfb618..c122386 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
@@ -86,7 +86,7 @@ public class TestMaster {
 
     // Now trigger a split and stop when the split is in progress
     LOG.info("Splitting table");
-    TEST_UTIL.getHBaseAdmin().split(TABLENAME.getName());
+    TEST_UTIL.getHBaseAdmin().split(TABLENAME);
     LOG.info("Waiting for split result to be about to open");
     RegionStates regionStates = m.assignmentManager.getRegionStates();
     while (regionStates.getRegionsOfTable(TABLENAME).size() <= 1) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
index 3bb7bba..a04e4d0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
@@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
 import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -251,7 +250,7 @@ public class TestTableLockManager {
     //ensure that znode for the table node has been deleted
     final ZooKeeperWatcher zkWatcher = TEST_UTIL.getZooKeeperWatcher();
     final String znode = ZKUtil.joinZNode(zkWatcher.tableLockZNode, 
TABLE_NAME.getNameAsString());
-    
+
     TEST_UTIL.waitFor(5000, new Waiter.Predicate<Exception>() {
       @Override
       public boolean evaluate() throws Exception {
@@ -373,9 +372,9 @@ public class TestTableLockManager {
           HRegion region = TEST_UTIL.getSplittableRegion(tableName, -1);
           if (region != null) {
             byte[] regionName = region.getRegionName();
-            admin.flush(regionName);
-            admin.compact(regionName);
-            admin.split(regionName);
+            admin.flushRegion(regionName);
+            admin.compactRegion(regionName);
+            admin.splitRegion(regionName);
           } else {
             LOG.warn("Could not find suitable region for the table.  Possibly 
the " +
               "region got closed and the attempts got over before " +

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java
index fe399a1..d0b2188 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java
@@ -236,13 +236,13 @@ public class TestNamespaceUpgrade {
       FSUtils.logFileSystemState(TEST_UTIL.getTestFileSystem(), 
TEST_UTIL.getDefaultRootDirPath()
           , LOG);
       Assert.assertEquals(newTableName + "", currentKeys.length, count);
-      TEST_UTIL.getHBaseAdmin().flush(newTableName.toBytes());
-      TEST_UTIL.getHBaseAdmin().majorCompact(newTableName.toBytes());
+      TEST_UTIL.getHBaseAdmin().flush(newTableName);
+      TEST_UTIL.getHBaseAdmin().majorCompact(newTableName);
       TEST_UTIL.waitFor(30000, new Waiter.Predicate<IOException>() {
         @Override
         public boolean evaluate() throws IOException {
           try {
-            return 
TEST_UTIL.getHBaseAdmin().getCompactionState(newTableName.toBytes()) ==
+            return TEST_UTIL.getHBaseAdmin().getCompactionState(newTableName) 
==
                 AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
           } catch (InterruptedException e) {
             throw new IOException(e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
index eabf75f..c4049dd 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
@@ -117,7 +117,7 @@ public class TestEncryptionKeyRotation {
     Thread.sleep(5000); // Need a predicate for online schema change
 
     // And major compact
-    TEST_UTIL.getHBaseAdmin().majorCompact(htd.getName());
+    TEST_UTIL.getHBaseAdmin().majorCompact(htd.getTableName());
     TEST_UTIL.waitFor(30000, 1000, true, new Predicate<Exception>() {
       @Override
       public boolean evaluate() throws Exception {
@@ -213,7 +213,7 @@ public class TestEncryptionKeyRotation {
     } finally {
       table.close();
     }
-    TEST_UTIL.getHBaseAdmin().flush(htd.getName());
+    TEST_UTIL.getHBaseAdmin().flush(htd.getTableName());
   }
 
   private static byte[] extractHFileKey(Path path) throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
index 8dc7143..9e5a7b0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
@@ -108,7 +108,7 @@ public class TestEncryptionRandomKeying {
     } finally {
       table.close();
     }
-    TEST_UTIL.getHBaseAdmin().flush(htd.getName());
+    TEST_UTIL.getHBaseAdmin().flush(htd.getTableName());
   }
 
   @AfterClass

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
index 8de605d..5189fc8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
@@ -245,6 +245,7 @@ public class TestEndToEndSplitTransaction {
       rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
     }
 
+    @Override
     public void run() {
       try {
         Random random = new Random();
@@ -279,7 +280,7 @@ public class TestEndToEndSplitTransaction {
 
           log("Initiating region split for:" + region.getRegionNameAsString());
           try {
-            admin.split(region.getRegionName(), splitPoint);
+            admin.splitRegion(region.getRegionName(), splitPoint);
             //wait until the split is complete
             blockUntilRegionSplit(conf, 50000, region.getRegionName(), true);
 
@@ -412,7 +413,7 @@ public class TestEndToEndSplitTransaction {
   public static void flushAndBlockUntilDone(Admin admin, HRegionServer rs, 
byte[] regionName)
       throws IOException, InterruptedException {
     log("flushing region: " + Bytes.toStringBinary(regionName));
-    admin.flush(regionName);
+    admin.flushRegion(regionName);
     log("blocking until flush is complete: " + 
Bytes.toStringBinary(regionName));
     Threads.sleepWithoutInterrupt(500);
     while (rs.cacheFlusher.getFlushQueueSize() > 0) {
@@ -423,7 +424,7 @@ public class TestEndToEndSplitTransaction {
   public static void compactAndBlockUntilDone(Admin admin, HRegionServer rs, 
byte[] regionName)
       throws IOException, InterruptedException {
     log("Compacting region: " + Bytes.toStringBinary(regionName));
-    admin.majorCompact(regionName);
+    admin.majorCompactRegion(regionName);
     log("blocking until compaction is complete: " + 
Bytes.toStringBinary(regionName));
     Threads.sleepWithoutInterrupt(500);
     while (rs.compactSplitThread.getCompactionQueueSize() > 0) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index b4ae3c9..3270411 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -204,7 +204,7 @@ public class TestRegionMergeTransactionOnCluster {
       assertTrue(fs.exists(regionAdir));
       assertTrue(fs.exists(regionBdir));
 
-      admin.compact(mergedRegionInfo.getRegionName());
+      admin.compactRegion(mergedRegionInfo.getRegionName());
       // wait until merged region doesn't have reference file
       long timeout = System.currentTimeMillis() + waitTime;
       HRegionFileSystem hrfs = new HRegionFileSystem(

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
index 99ea553..aa35486 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@ -208,7 +208,7 @@ public class TestRegionServerMetrics {
 
   @Test
   public void testStoreCount() throws Exception {
-    byte[] tableName = Bytes.toBytes("testStoreCount");
+    TableName tableName = TableName.valueOf("testStoreCount");
     byte[] cf = Bytes.toBytes("d");
     byte[] row = Bytes.toBytes("rk");
     byte[] qualifier = Bytes.toBytes("qual");

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
index d259933..e612585 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
@@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -123,8 +122,8 @@ public class TestTags {
       put.add(fam, qual, HConstants.LATEST_TIMESTAMP, value);
       put.setAttribute("visibility", Bytes.toBytes("myTag"));
       table.put(put);
-      admin.flush(tableName.getName());
-      List<HRegion> regions = 
TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
+      admin.flush(tableName);
+      List<HRegion> regions = 
TEST_UTIL.getHBaseCluster().getRegions(tableName);
       for (HRegion region : regions) {
         Store store = region.getStore(fam);
         while (!(store.getStorefilesCount() > 0)) {
@@ -137,8 +136,8 @@ public class TestTags {
       put1.add(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
       // put1.setAttribute("visibility", Bytes.toBytes("myTag3"));
       table.put(put1);
-      admin.flush(tableName.getName());
-      regions = TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
+      admin.flush(tableName);
+      regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
       for (HRegion region : regions) {
         Store store = region.getStore(fam);
         while (!(store.getStorefilesCount() > 1)) {
@@ -152,7 +151,7 @@ public class TestTags {
       put2.setAttribute("visibility", Bytes.toBytes("myTag3"));
       table.put(put2);
 
-      admin.flush(tableName.getName());
+      admin.flush(tableName);
       regions = TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
       for (HRegion region : regions) {
         Store store = region.getStore(fam);
@@ -161,8 +160,8 @@ public class TestTags {
         }
       }
       result(fam, row, qual, row2, table, value, value2, row1, value1);
-      admin.compact(tableName.getName());
-      while (admin.getCompactionState(tableName.getName()) != 
CompactionState.NONE) {
+      admin.compact(tableName);
+      while (admin.getCompactionState(tableName) != CompactionState.NONE) {
         Thread.sleep(10);
       }
       result(fam, row, qual, row2, table, value, value2, row1, value1);
@@ -201,7 +200,7 @@ public class TestTags {
       byte[] value = Bytes.toBytes("value");
       put.add(fam, qual, HConstants.LATEST_TIMESTAMP, value);
       table.put(put);
-      admin.flush(tableName.getName());
+      admin.flush(tableName);
       List<HRegion> regions = 
TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
       for (HRegion region : regions) {
         Store store = region.getStore(fam);
@@ -214,7 +213,7 @@ public class TestTags {
       byte[] value1 = Bytes.toBytes("1000dfsdf");
       put1.add(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
       table.put(put1);
-      admin.flush(tableName.getName());
+      admin.flush(tableName);
       regions = TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
       for (HRegion region : regions) {
         Store store = region.getStore(fam);
@@ -228,8 +227,8 @@ public class TestTags {
       put2.add(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
       table.put(put2);
 
-      admin.flush(tableName.getName());
-      regions = TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
+      admin.flush(tableName);
+      regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
       for (HRegion region : regions) {
         Store store = region.getStore(fam);
         while (!(store.getStorefilesCount() > 2)) {
@@ -250,8 +249,8 @@ public class TestTags {
         if (scanner != null)
           scanner.close();
       }
-      admin.compact(tableName.getName());
-      while (admin.getCompactionState(tableName.getName()) != 
CompactionState.NONE) {
+      admin.compact(tableName);
+      while (admin.getCompactionState(tableName) != CompactionState.NONE) {
         Thread.sleep(10);
       }
       s = new Scan(row);
@@ -310,7 +309,7 @@ public class TestTags {
         byte[] value1 = Bytes.toBytes("1000dfsdf");
         put1.add(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
         table.put(put1);
-        admin.flush(tableName.getName());
+        admin.flush(tableName);
         List<HRegion> regions = 
TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
         for (HRegion region : regions) {
           Store store = region.getStore(fam);
@@ -323,8 +322,8 @@ public class TestTags {
         value1 = Bytes.toBytes("1000dfsdf");
         put1.add(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
         table.put(put1);
-        admin.flush(tableName.getName());
-        regions = TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
+        admin.flush(tableName);
+        regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
         for (HRegion region : regions) {
           Store store = region.getStore(fam);
           while (!(store.getStorefilesCount() > 1)) {
@@ -340,8 +339,8 @@ public class TestTags {
         put2.add(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
         put.setAttribute("visibility", Bytes.toBytes("ram"));
         table.put(put2);
-        admin.flush(tableName.getName());
-        regions = TEST_UTIL.getHBaseCluster().getRegions(tableName.getName());
+        admin.flush(tableName);
+        regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
         for (HRegion region : regions) {
           Store store = region.getStore(fam);
           while (!(store.getStorefilesCount() > 2)) {
@@ -372,7 +371,7 @@ public class TestTags {
           }
           TestCoprocessorForTags.checkTagPresence = false;
         }
-        while (admin.getCompactionState(tableName.getName()) != 
CompactionState.NONE) {
+        while (admin.getCompactionState(tableName) != CompactionState.NONE) {
           Thread.sleep(10);
         }
         TestCoprocessorForTags.checkTagPresence = true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
index a47c826..ddac2b7 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
@@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -324,7 +323,7 @@ public class TestLogRolling  {
     TEST_UTIL.ensureSomeRegionServersAvailable(2);
     assertTrue("This test requires HLog file replication set to 2.",
       fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) == 2);
-    LOG.info("Replication=" + 
+    LOG.info("Replication=" +
       fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()));
 
     this.server = cluster.getRegionServer(0);
@@ -363,9 +362,9 @@ public class TestLogRolling  {
     }
 
     assertTrue("DataNodes " + dfsCluster.getDataNodes().size() +
-        " default replication " + 
+        " default replication " +
         fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()),
-    dfsCluster.getDataNodes().size() >= 
+    dfsCluster.getDataNodes().size() >=
       fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) + 1);
 
     writeData(table, 2);
@@ -378,7 +377,7 @@ public class TestLogRolling  {
     assertTrue("The log shouldn't have rolled yet",
       oldFilenum == ((FSHLog) log).getFilenum());
     final DatanodeInfo[] pipeline = getPipeline(log);
-    assertTrue(pipeline.length == 
+    assertTrue(pipeline.length ==
         fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()));
 
     // kill a datanode in the pipeline to force a log roll on the next sync()
@@ -414,7 +413,7 @@ public class TestLogRolling  {
     batchWriteAndWait(table, 13, true, 10000);
     assertTrue("New log file should have the default replication instead of " +
       ((FSHLog) log).getLogReplication(),
-      ((FSHLog) log).getLogReplication() == 
+      ((FSHLog) log).getLogReplication() ==
         fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()));
     assertTrue("LowReplication Roller should've been enabled",
         log.isLowReplicationRollEnabled());
@@ -430,7 +429,7 @@ public class TestLogRolling  {
     LOG.info("Starting testLogRollOnPipelineRestart");
     assertTrue("This test requires HLog file replication.",
       fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) > 1);
-    LOG.info("Replication=" + 
+    LOG.info("Replication=" +
       fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()));
     // When the hbase:meta table can be opened, the region servers are running
     HTable t = new HTable(TEST_UTIL.getConfiguration(), 
TableName.META_TABLE_NAME);
@@ -610,12 +609,12 @@ public class TestLogRolling  {
       Store s = region.getStore(HConstants.CATALOG_FAMILY);
 
       //have to flush namespace to ensure it doesn't affect wall tests
-      admin.flush(TableName.NAMESPACE_TABLE_NAME.getName());
+      admin.flush(TableName.NAMESPACE_TABLE_NAME);
 
       // Put some stuff into table2, to make sure we have some files to 
compact.
       for (int i = 1; i <= 2; ++i) {
         doPut(table2, i);
-        admin.flush(table2.getTableName());
+        admin.flush(table2.getName());
       }
       doPut(table2, 3); // don't flush yet, or compaction might trigger before 
we roll WAL
       assertEquals("Should have no WAL after initial writes", 0, 
fshLog.getNumRolledLogFiles());
@@ -624,7 +623,7 @@ public class TestLogRolling  {
       // Roll the log and compact table2, to have compaction record in the 2nd 
WAL.
       fshLog.rollWriter();
       assertEquals("Should have WAL; one table is not flushed", 1, 
fshLog.getNumRolledLogFiles());
-      admin.flush(table2.getTableName());
+      admin.flush(table2.getName());
       region.compactStores();
       // Wait for compaction in case if flush triggered it before us.
       Assert.assertNotNull(s);
@@ -639,7 +638,7 @@ public class TestLogRolling  {
       assertEquals("Should have WAL; one table is not flushed", 1, 
fshLog.getNumRolledLogFiles());
 
       // Flush table to make latest WAL obsolete; write another record, and 
roll again.
-      admin.flush(table.getTableName());
+      admin.flush(table.getName());
       doPut(table, 1);
       fshLog.rollWriter(); // Now 2nd WAL is deleted and 3rd is added.
       assertEquals("Should have 1 WALs at the end", 1, 
fshLog.getNumRolledLogFiles());

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java
index 25b62b8..719f7c2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java
@@ -32,7 +32,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Durability;
@@ -63,7 +62,7 @@ public class TestTableResource {
   private static Map<HRegionInfo, ServerName> regionMap;
 
   private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
-  private static final HBaseRESTTestingUtility REST_TEST_UTIL = 
+  private static final HBaseRESTTestingUtility REST_TEST_UTIL =
     new HBaseRESTTestingUtility();
   private static Client client;
   private static JAXBContext context;
@@ -72,7 +71,7 @@ public class TestTableResource {
   public static void setUpBeforeClass() throws Exception {
     TEST_UTIL.startMiniCluster(3);
     REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
-    client = new Client(new Cluster().add("localhost", 
+    client = new Client(new Cluster().add("localhost",
       REST_TEST_UTIL.getServletPort()));
     context = JAXBContext.newInstance(
         TableModel.class,
@@ -107,7 +106,7 @@ public class TestTableResource {
     Map<HRegionInfo, ServerName> m = table.getRegionLocations();
     assertEquals(m.size(), 1);
     // tell the master to split the table
-    admin.split(TABLE.toBytes());
+    admin.split(TABLE);
     // give some time for the split to happen
 
     long timeout = System.currentTimeMillis() + (15 * 1000);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbe0522/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
index b81bf4f..c43689a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
@@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.LargeTests;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.security.User;
@@ -133,7 +132,7 @@ public class TestTablePermissions {
   }
 
   /**
-   * Writes a set of permissions as {@link org.apache.hadoop.io.Writable} 
instances                                                                       
                                              
+   * Writes a set of permissions as {@link org.apache.hadoop.io.Writable} 
instances
    * and returns the resulting byte array.  Used to verify we can read stuff 
written
    * with Writable.
    */
@@ -157,7 +156,7 @@ public class TestTablePermissions {
    * @param conf
    * @throws IOException
   */
-  public static void writePermissions(DataOutput out,                          
                                                                                
                                         
+  public static void writePermissions(DataOutput out,
       ListMultimap<String,? extends Permission> perms, Configuration conf)
   throws IOException {
     Set<String> keys = perms.keySet();
@@ -294,7 +293,7 @@ public class TestTablePermissions {
     table.put(new Put(Bytes.toBytes("row2"))
         .add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v2")));
     Admin admin = UTIL.getHBaseAdmin();
-    admin.split(TEST_TABLE.getName());
+    admin.split(TEST_TABLE);
 
     // wait for split
     Thread.sleep(10000);

Reply via email to