HBASE-11995 Use Connection and ConnectionFactory where possible (Solomon Duskis)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/669bc49b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/669bc49b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/669bc49b

Branch: refs/heads/branch-1
Commit: 669bc49b47da323a5703406dca90f6dce7acebbd
Parents: e5840e9
Author: Enis Soztutar <e...@apache.org>
Authored: Fri Sep 26 11:49:37 2014 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Fri Sep 26 12:00:24 2014 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hbase/MetaTableAccessor.java  | 230 +++++++++----------
 .../hadoop/hbase/client/ConnectionAdapter.java  |   2 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   2 +-
 .../org/apache/hadoop/hbase/client/HTable.java  |   4 +-
 .../apache/hadoop/hbase/client/Registry.java    |   4 +-
 .../hadoop/hbase/client/ZooKeeperRegistry.java  |   4 +-
 .../hbase/client/TestClientNoCluster.java       |  10 +-
 .../SnapshotOfRegionAssignmentFromMeta.java     |  14 +-
 .../balancer/FavoredNodeAssignmentHelper.java   |   8 +-
 .../master/snapshot/RestoreSnapshotHandler.java |   4 +-
 .../regionserver/ReplicationSink.java           |   8 +-
 .../hbase/snapshot/RestoreSnapshotHelper.java   |   8 +-
 .../hadoop/hbase/util/MultiHConnection.java     |   3 +-
 .../hadoop/hbase/HBaseTestingUtility.java       |   5 +-
 .../hadoop/hbase/PerformanceEvaluation.java     |  42 ++--
 .../hadoop/hbase/TestMetaTableAccessor.java     |  61 ++---
 .../hadoop/hbase/client/TestClientTimeouts.java |   4 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   2 +-
 .../org/apache/hadoop/hbase/client/TestHCM.java |  42 ++--
 .../TestMasterOperationsForRegionReplicas.java  |  15 +-
 .../security/access/TestAccessController.java   |   6 +-
 .../apache/hadoop/hbase/util/TestHBaseFsck.java |   4 +-
 .../hadoop/hbase/util/TestMergeTable.java       |   9 +-
 23 files changed, 247 insertions(+), 244 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index d7c2233..d3dfa99 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -24,9 +24,9 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
@@ -135,65 +135,65 @@ public class MetaTableAccessor {
    * @return List of {@link org.apache.hadoop.hbase.client.Result}
    * @throws IOException
    */
-  public static List<Result> fullScanOfMeta(HConnection hConnection)
+  public static List<Result> fullScanOfMeta(Connection connection)
   throws IOException {
     CollectAllVisitor v = new CollectAllVisitor();
-    fullScan(hConnection, v, null);
+    fullScan(connection, v, null);
     return v.getResults();
   }
 
   /**
    * Performs a full scan of <code>hbase:meta</code>.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param visitor Visitor invoked against each row.
    * @throws IOException
    */
-  public static void fullScan(HConnection hConnection,
+  public static void fullScan(Connection connection,
       final Visitor visitor)
   throws IOException {
-    fullScan(hConnection, visitor, null);
+    fullScan(connection, visitor, null);
   }
 
   /**
    * Performs a full scan of <code>hbase:meta</code>.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @return List of {@link Result}
    * @throws IOException
    */
-  public static List<Result> fullScan(HConnection hConnection)
+  public static List<Result> fullScan(Connection connection)
     throws IOException {
     CollectAllVisitor v = new CollectAllVisitor();
-    fullScan(hConnection, v, null);
+    fullScan(connection, v, null);
     return v.getResults();
   }
 
   /**
    * Callers should call close on the returned {@link HTable} instance.
-   * @param hConnection connection we're using to access table
+   * @param connection connection we're using to access table
    * @param tableName Table to get an {@link 
org.apache.hadoop.hbase.client.HTable} against.
    * @return An {@link org.apache.hadoop.hbase.client.HTable} for 
<code>tableName</code>
    * @throws IOException
    * @SuppressWarnings("deprecation")
    */
-  private static Table getHTable(final HConnection hConnection,
+  private static Table getHTable(final Connection connection,
       final TableName tableName)
   throws IOException {
-    // We used to pass whole CatalogTracker in here, now we just pass in 
HConnection
-    if (hConnection == null || hConnection.isClosed()) {
+    // We used to pass whole CatalogTracker in here, now we just pass in 
Connection
+    if (connection == null || connection.isClosed()) {
       throw new NullPointerException("No connection");
     }
-    return new HTable(tableName, hConnection);
+    return new HTable(tableName, connection);
   }
 
   /**
    * Callers should call close on the returned {@link HTable} instance.
-   * @param hConnection connection we're using to access Meta
+   * @param connection connection we're using to access Meta
    * @return An {@link HTable} for <code>hbase:meta</code>
    * @throws IOException
    */
-  static Table getMetaHTable(final HConnection hConnection)
+  static Table getMetaHTable(final Connection connection)
   throws IOException {
-    return getHTable(hConnection, TableName.META_TABLE_NAME);
+    return getHTable(connection, TableName.META_TABLE_NAME);
   }
 
   /**
@@ -211,17 +211,17 @@ public class MetaTableAccessor {
 
   /**
    * Gets the region info and assignment for the specified region.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionName Region to lookup.
    * @return Location and HRegionInfo for <code>regionName</code>
    * @throws IOException
-   * @deprecated use {@link #getRegionLocation(HConnection, byte[])} instead
+   * @deprecated use {@link #getRegionLocation(Connection, byte[])} instead
    */
   @Deprecated
   public static Pair<HRegionInfo, ServerName> getRegion(
-    HConnection hConnection, byte [] regionName)
+    Connection connection, byte [] regionName)
     throws IOException {
-    HRegionLocation location = getRegionLocation(hConnection, regionName);
+    HRegionLocation location = getRegionLocation(connection, regionName);
     return location == null
       ? null
       : new Pair<HRegionInfo, ServerName>(location.getRegionInfo(), 
location.getServerName());
@@ -229,12 +229,12 @@ public class MetaTableAccessor {
 
   /**
    * Returns the HRegionLocation from meta for the given region
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionName region we're looking for
    * @return HRegionLocation for the given region
    * @throws IOException
    */
-  public static HRegionLocation getRegionLocation(HConnection hConnection,
+  public static HRegionLocation getRegionLocation(Connection connection,
                                                   byte[] regionName) throws 
IOException {
     byte[] row = regionName;
     HRegionInfo parsedInfo = null;
@@ -246,7 +246,7 @@ public class MetaTableAccessor {
     }
     Get get = new Get(row);
     get.addFamily(HConstants.CATALOG_FAMILY);
-    Result r = get(getMetaHTable(hConnection), get);
+    Result r = get(getMetaHTable(connection), get);
     RegionLocations locations = getRegionLocations(r);
     return locations == null
       ? null
@@ -255,17 +255,17 @@ public class MetaTableAccessor {
 
   /**
    * Returns the HRegionLocation from meta for the given region
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionInfo region information
    * @return HRegionLocation for the given region
    * @throws IOException
    */
-  public static HRegionLocation getRegionLocation(HConnection hConnection,
+  public static HRegionLocation getRegionLocation(Connection connection,
                                                   HRegionInfo regionInfo) 
throws IOException {
     byte[] row = getMetaKeyForRegion(regionInfo);
     Get get = new Get(row);
     get.addFamily(HConstants.CATALOG_FAMILY);
-    Result r = get(getMetaHTable(hConnection), get);
+    Result r = get(getMetaHTable(connection), get);
     return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
   }
 
@@ -289,16 +289,16 @@ public class MetaTableAccessor {
 
   /**
    * Gets the result in hbase:meta for the specified region.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionName region we're looking for
    * @return result of the specified region
    * @throws IOException
    */
-  public static Result getRegionResult(HConnection hConnection,
+  public static Result getRegionResult(Connection connection,
       byte[] regionName) throws IOException {
     Get get = new Get(regionName);
     get.addFamily(HConstants.CATALOG_FAMILY);
-    return get(getMetaHTable(hConnection), get);
+    return get(getMetaHTable(connection), get);
   }
 
   /**
@@ -307,8 +307,8 @@ public class MetaTableAccessor {
    * @throws IOException
    */
   public static Pair<HRegionInfo, HRegionInfo> getRegionsFromMergeQualifier(
-      HConnection hConnection, byte[] regionName) throws IOException {
-    Result result = getRegionResult(hConnection, regionName);
+      Connection connection, byte[] regionName) throws IOException {
+    Result result = getRegionResult(connection, regionName);
     HRegionInfo mergeA = getHRegionInfo(result, HConstants.MERGEA_QUALIFIER);
     HRegionInfo mergeB = getHRegionInfo(result, HConstants.MERGEB_QUALIFIER);
     if (mergeA == null && mergeB == null) {
@@ -320,12 +320,12 @@ public class MetaTableAccessor {
   /**
    * Checks if the specified table exists.  Looks at the hbase:meta table 
hosted on
    * the specified server.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param tableName table to check
    * @return true if the table exists in meta, false if not
    * @throws IOException
    */
-  public static boolean tableExists(HConnection hConnection,
+  public static boolean tableExists(Connection connection,
       final TableName tableName)
   throws IOException {
     if (tableName.equals(TableName.META_TABLE_NAME)) {
@@ -361,7 +361,7 @@ public class MetaTableAccessor {
         this.results.add(this.current);
       }
     };
-    fullScan(hConnection, visitor, getTableStartRowForMeta(tableName));
+    fullScan(connection, visitor, getTableStartRowForMeta(tableName));
     // If visitor has results >= 1 then table exists.
     return visitor.getResults().size() >= 1;
   }
@@ -369,21 +369,21 @@ public class MetaTableAccessor {
   /**
    * Gets all of the regions of the specified table.
    * @param zkw zookeeper connection to access meta table
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param tableName table we're looking for
    * @return Ordered list of {@link HRegionInfo}.
    * @throws IOException
    */
   public static List<HRegionInfo> getTableRegions(ZooKeeperWatcher zkw,
-      HConnection hConnection, TableName tableName)
+      Connection connection, TableName tableName)
   throws IOException {
-    return getTableRegions(zkw, hConnection, tableName, false);
+    return getTableRegions(zkw, connection, tableName, false);
   }
 
   /**
    * Gets all of the regions of the specified table.
    * @param zkw zookeeper connection to access meta table
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param tableName table we're looking for
    * @param excludeOfflinedSplitParents If true, do not include offlined split
    * parents in the return.
@@ -391,11 +391,11 @@ public class MetaTableAccessor {
    * @throws IOException
    */
   public static List<HRegionInfo> getTableRegions(ZooKeeperWatcher zkw,
-      HConnection hConnection, TableName tableName, final boolean 
excludeOfflinedSplitParents)
+      Connection connection, TableName tableName, final boolean 
excludeOfflinedSplitParents)
         throws IOException {
     List<Pair<HRegionInfo, ServerName>> result = null;
     try {
-      result = getTableRegionsAndLocations(zkw, hConnection, tableName,
+      result = getTableRegionsAndLocations(zkw, connection, tableName,
         excludeOfflinedSplitParents);
     } catch (InterruptedException e) {
       throw (InterruptedIOException)new InterruptedIOException().initCause(e);
@@ -458,7 +458,7 @@ public class MetaTableAccessor {
 
   /**
    * @param zkw zookeeper connection to access meta table
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param tableName table we're looking for
    * @return Return list of regioninfos and server.
    * @throws IOException
@@ -466,21 +466,21 @@ public class MetaTableAccessor {
    */
   public static List<Pair<HRegionInfo, ServerName>>
   getTableRegionsAndLocations(ZooKeeperWatcher zkw,
-                              HConnection hConnection, TableName tableName)
+                              Connection connection, TableName tableName)
   throws IOException, InterruptedException {
-    return getTableRegionsAndLocations(zkw, hConnection, tableName, true);
+    return getTableRegionsAndLocations(zkw, connection, tableName, true);
   }
 
   /**
    * @param zkw ZooKeeperWatcher instance we're using to get hbase:meta 
location
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param tableName table to work with
    * @return Return list of regioninfos and server addresses.
    * @throws IOException
    * @throws InterruptedException
    */
   public static List<Pair<HRegionInfo, ServerName>> 
getTableRegionsAndLocations(
-      ZooKeeperWatcher zkw, HConnection hConnection, final TableName tableName,
+      ZooKeeperWatcher zkw, Connection connection, final TableName tableName,
       final boolean excludeOfflinedSplitParents) throws IOException, 
InterruptedException {
 
     if (tableName.equals(TableName.META_TABLE_NAME)) {
@@ -524,19 +524,19 @@ public class MetaTableAccessor {
           }
         }
       };
-    fullScan(hConnection, visitor, getTableStartRowForMeta(tableName));
+    fullScan(connection, visitor, getTableStartRowForMeta(tableName));
     return visitor.getResults();
   }
 
   /**
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param serverName server whose regions we're interested in
    * @return List of user regions installed on this server (does not include
    * catalog regions).
    * @throws IOException
    */
   public static NavigableMap<HRegionInfo, Result>
-  getServerUserRegions(HConnection hConnection, final ServerName serverName)
+  getServerUserRegions(Connection connection, final ServerName serverName)
     throws IOException {
     final NavigableMap<HRegionInfo, Result> hris = new TreeMap<HRegionInfo, 
Result>();
     // Fill the above hris map with entries from hbase:meta that have the 
passed
@@ -556,11 +556,11 @@ public class MetaTableAccessor {
         }
       }
     };
-    fullScan(hConnection, v);
+    fullScan(connection, v);
     return hris;
   }
 
-  public static void fullScanMetaAndPrint(HConnection hConnection)
+  public static void fullScanMetaAndPrint(Connection connection)
     throws IOException {
     Visitor v = new Visitor() {
       @Override
@@ -577,30 +577,30 @@ public class MetaTableAccessor {
         return true;
       }
     };
-    fullScan(hConnection, v);
+    fullScan(connection, v);
   }
 
   /**
    * Performs a full scan of a catalog table.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param visitor Visitor invoked against each row.
    * @param startrow Where to start the scan. Pass null if want to begin scan
    * at first row.
    * <code>hbase:meta</code>, the default (pass false to scan hbase:meta)
    * @throws IOException
    */
-  public static void fullScan(HConnection hConnection,
+  public static void fullScan(Connection connection,
     final Visitor visitor, final byte [] startrow)
   throws IOException {
     Scan scan = new Scan();
     if (startrow != null) scan.setStartRow(startrow);
     if (startrow == null) {
-      int caching = hConnection.getConfiguration()
+      int caching = connection.getConfiguration()
           .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100);
       scan.setCaching(caching);
     }
     scan.addFamily(HConstants.CATALOG_FAMILY);
-    Table metaTable = getMetaHTable(hConnection);
+    Table metaTable = getMetaHTable(connection);
     ResultScanner scanner = null;
     try {
       scanner = metaTable.getScanner(scan);
@@ -935,13 +935,13 @@ public class MetaTableAccessor {
 
   /**
    * Put the passed <code>p</code> to the <code>hbase:meta</code> table.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param p Put to add to hbase:meta
    * @throws IOException
    */
-  static void putToMetaTable(final HConnection hConnection, final Put p)
+  static void putToMetaTable(final Connection connection, final Put p)
     throws IOException {
-    put(getMetaHTable(hConnection), p);
+    put(getMetaHTable(connection), p);
   }
 
   /**
@@ -959,13 +959,13 @@ public class MetaTableAccessor {
 
   /**
    * Put the passed <code>ps</code> to the <code>hbase:meta</code> table.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param ps Put to add to hbase:meta
    * @throws IOException
    */
-  public static void putsToMetaTable(final HConnection hConnection, final 
List<Put> ps)
+  public static void putsToMetaTable(final Connection connection, final 
List<Put> ps)
     throws IOException {
-    Table t = getMetaHTable(hConnection);
+    Table t = getMetaHTable(connection);
     try {
       t.put(ps);
     } finally {
@@ -975,26 +975,26 @@ public class MetaTableAccessor {
 
   /**
    * Delete the passed <code>d</code> from the <code>hbase:meta</code> table.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param d Delete to add to hbase:meta
    * @throws IOException
    */
-  static void deleteFromMetaTable(final HConnection hConnection, final Delete 
d)
+  static void deleteFromMetaTable(final Connection connection, final Delete d)
     throws IOException {
     List<Delete> dels = new ArrayList<Delete>(1);
     dels.add(d);
-    deleteFromMetaTable(hConnection, dels);
+    deleteFromMetaTable(connection, dels);
   }
 
   /**
    * Delete the passed <code>deletes</code> from the <code>hbase:meta</code> 
table.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param deletes Deletes to add to hbase:meta  This list should support 
#remove.
    * @throws IOException
    */
-  public static void deleteFromMetaTable(final HConnection hConnection, final 
List<Delete> deletes)
+  public static void deleteFromMetaTable(final Connection connection, final 
List<Delete> deletes)
     throws IOException {
-    Table t = getMetaHTable(hConnection);
+    Table t = getMetaHTable(connection);
     try {
       t.delete(deletes);
     } finally {
@@ -1007,11 +1007,11 @@ public class MetaTableAccessor {
    * @param metaRows rows in hbase:meta
    * @param replicaIndexToDeleteFrom the replica ID we would start deleting 
from
    * @param numReplicasToRemove how many replicas to remove
-   * @param hConnection connection we're using to access meta table
+   * @param connection connection we're using to access meta table
    * @throws IOException
    */
   public static void removeRegionReplicasFromMeta(Set<byte[]> metaRows,
-    int replicaIndexToDeleteFrom, int numReplicasToRemove, HConnection 
hConnection)
+    int replicaIndexToDeleteFrom, int numReplicasToRemove, Connection 
connection)
       throws IOException {
     int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove;
     for (byte[] row : metaRows) {
@@ -1024,20 +1024,20 @@ public class MetaTableAccessor {
         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
           getStartCodeColumn(i));
       }
-      deleteFromMetaTable(hConnection, deleteReplicaLocations);
+      deleteFromMetaTable(connection, deleteReplicaLocations);
     }
   }
 
   /**
    * Execute the passed <code>mutations</code> against <code>hbase:meta</code> 
table.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param mutations Puts and Deletes to execute on hbase:meta
    * @throws IOException
    */
-  public static void mutateMetaTable(final HConnection hConnection,
+  public static void mutateMetaTable(final Connection connection,
                                      final List<Mutation> mutations)
     throws IOException {
-    Table t = getMetaHTable(hConnection);
+    Table t = getMetaHTable(connection);
     try {
       t.batch(mutations);
     } catch (InterruptedException e) {
@@ -1051,14 +1051,14 @@ public class MetaTableAccessor {
 
   /**
    * Adds a hbase:meta row for the specified new region.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionInfo region information
    * @throws IOException if problem connecting or updating meta
    */
-  public static void addRegionToMeta(HConnection hConnection,
+  public static void addRegionToMeta(Connection connection,
                                      HRegionInfo regionInfo)
     throws IOException {
-    putToMetaTable(hConnection, makePutFromRegionInfo(regionInfo));
+    putToMetaTable(connection, makePutFromRegionInfo(regionInfo));
     LOG.info("Added " + regionInfo.getRegionNameAsString());
   }
 
@@ -1077,7 +1077,7 @@ public class MetaTableAccessor {
    * Adds a (single) hbase:meta row for the specified new region and its 
daughters. Note that this
    * does not add its daughter's as different rows, but adds information about 
the daughters
    * in the same row as the parent. Use
-   * {@link #splitRegion(org.apache.hadoop.hbase.client.HConnection,
+   * {@link #splitRegion(org.apache.hadoop.hbase.client.Connection,
    *   HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
    * if you want to do that.
    * @param meta the HTable for META
@@ -1100,17 +1100,17 @@ public class MetaTableAccessor {
    * Adds a (single) hbase:meta row for the specified new region and its 
daughters. Note that this
    * does not add its daughter's as different rows, but adds information about 
the daughters
    * in the same row as the parent. Use
-   * {@link #splitRegion(HConnection, HRegionInfo, HRegionInfo, HRegionInfo, 
ServerName)}
+   * {@link #splitRegion(Connection, HRegionInfo, HRegionInfo, HRegionInfo, 
ServerName)}
    * if you want to do that.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionInfo region information
    * @param splitA first split daughter of the parent regionInfo
    * @param splitB second split daughter of the parent regionInfo
    * @throws IOException if problem connecting or updating meta
    */
-  public static void addRegionToMeta(HConnection hConnection, HRegionInfo 
regionInfo,
+  public static void addRegionToMeta(Connection connection, HRegionInfo 
regionInfo,
                                      HRegionInfo splitA, HRegionInfo splitB) 
throws IOException {
-    Table meta = getMetaHTable(hConnection);
+    Table meta = getMetaHTable(connection);
     try {
       addRegionToMeta(meta, regionInfo, splitA, splitB);
     } finally {
@@ -1120,11 +1120,11 @@ public class MetaTableAccessor {
 
   /**
    * Adds a hbase:meta row for each of the specified new regions.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionInfos region information list
    * @throws IOException if problem connecting or updating meta
    */
-  public static void addRegionsToMeta(HConnection hConnection,
+  public static void addRegionsToMeta(Connection connection,
                                       List<HRegionInfo> regionInfos)
     throws IOException {
     List<Put> puts = new ArrayList<Put>();
@@ -1133,7 +1133,7 @@ public class MetaTableAccessor {
         puts.add(makePutFromRegionInfo(regionInfo));
       }
     }
-    putsToMetaTable(hConnection, puts);
+    putsToMetaTable(connection, puts);
     LOG.info("Added " + puts.size());
   }
 
@@ -1143,7 +1143,7 @@ public class MetaTableAccessor {
    * @param sn the location of the region
    * @param openSeqNum the latest sequence number obtained when the region was 
open
    */
-  public static void addDaughter(final HConnection hConnection,
+  public static void addDaughter(final Connection connection,
       final HRegionInfo regionInfo, final ServerName sn, final long openSeqNum)
       throws NotAllMetaRegionsOnlineException, IOException {
     Put put = new Put(regionInfo.getRegionName());
@@ -1151,7 +1151,7 @@ public class MetaTableAccessor {
     if (sn != null) {
       addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
     }
-    putToMetaTable(hConnection, put);
+    putToMetaTable(connection, put);
     LOG.info("Added daughter " + regionInfo.getEncodedName() +
       (sn == null? ", serverName=null": ", serverName=" + sn.toString()));
   }
@@ -1160,16 +1160,16 @@ public class MetaTableAccessor {
    * Merge the two regions into one in an atomic operation. Deletes the two
    * merging regions in hbase:meta and adds the merged region with the 
information of
    * two merging regions.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param mergedRegion the merged region
    * @param regionA
    * @param regionB
    * @param sn the location of the region
    * @throws IOException
    */
-  public static void mergeRegions(final HConnection hConnection, HRegionInfo 
mergedRegion,
+  public static void mergeRegions(final Connection connection, HRegionInfo 
mergedRegion,
       HRegionInfo regionA, HRegionInfo regionB, ServerName sn) throws 
IOException {
-    Table meta = getMetaHTable(hConnection);
+    Table meta = getMetaHTable(connection);
     try {
       HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
 
@@ -1200,16 +1200,16 @@ public class MetaTableAccessor {
    * region with the information that it is split into two, and also adds
    * the daughter regions. Does not add the location information to the 
daughter
    * regions since they are not open yet.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param parent the parent region which is split
    * @param splitA Split daughter region A
    * @param splitB Split daughter region A
    * @param sn the location of the region
    */
-  public static void splitRegion(final HConnection hConnection,
+  public static void splitRegion(final Connection connection,
                                  HRegionInfo parent, HRegionInfo splitA, 
HRegionInfo splitB,
                                  ServerName sn) throws IOException {
-    Table meta = getMetaHTable(hConnection);
+    Table meta = getMetaHTable(connection);
     try {
       HRegionInfo copyOfParent = new HRegionInfo(parent);
       copyOfParent.setOffline(true);
@@ -1270,15 +1270,15 @@ public class MetaTableAccessor {
    * Uses passed catalog tracker to get a connection to the server hosting
    * hbase:meta and makes edits to that region.
    *
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionInfo region to update location of
    * @param sn Server name
    * @throws IOException
    */
-  public static void updateRegionLocation(HConnection hConnection,
+  public static void updateRegionLocation(Connection connection,
                                           HRegionInfo regionInfo, ServerName 
sn, long updateSeqNum)
     throws IOException {
-    updateLocation(hConnection, regionInfo, sn, updateSeqNum);
+    updateLocation(connection, regionInfo, sn, updateSeqNum);
   }
 
   /**
@@ -1287,62 +1287,62 @@ public class MetaTableAccessor {
    * Connects to the specified server which should be hosting the specified
    * catalog region name to perform the edit.
    *
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionInfo region to update location of
    * @param sn Server name
    * @param openSeqNum the latest sequence number obtained when the region was 
open
    * @throws IOException In particular could throw {@link 
java.net.ConnectException}
    * if the server is down on other end.
    */
-  private static void updateLocation(final HConnection hConnection,
+  private static void updateLocation(final Connection connection,
                                      HRegionInfo regionInfo, ServerName sn, 
long openSeqNum)
     throws IOException {
     // region replicas are kept in the primary region's row
     Put put = new Put(getMetaKeyForRegion(regionInfo));
     addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
-    putToMetaTable(hConnection, put);
+    putToMetaTable(connection, put);
     LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
       " with server=" + sn);
   }
 
   /**
    * Deletes the specified region from META.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionInfo region to be deleted from META
    * @throws IOException
    */
-  public static void deleteRegion(HConnection hConnection,
+  public static void deleteRegion(Connection connection,
                                   HRegionInfo regionInfo)
     throws IOException {
     Delete delete = new Delete(regionInfo.getRegionName());
-    deleteFromMetaTable(hConnection, delete);
+    deleteFromMetaTable(connection, delete);
     LOG.info("Deleted " + regionInfo.getRegionNameAsString());
   }
 
   /**
    * Deletes the specified regions from META.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionsInfo list of regions to be deleted from META
    * @throws IOException
    */
-  public static void deleteRegions(HConnection hConnection,
+  public static void deleteRegions(Connection connection,
                                    List<HRegionInfo> regionsInfo) throws 
IOException {
     List<Delete> deletes = new ArrayList<Delete>(regionsInfo.size());
     for (HRegionInfo hri: regionsInfo) {
       deletes.add(new Delete(hri.getRegionName()));
     }
-    deleteFromMetaTable(hConnection, deletes);
+    deleteFromMetaTable(connection, deletes);
     LOG.info("Deleted " + regionsInfo);
   }
 
   /**
    * Adds and Removes the specified regions from hbase:meta
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionsToRemove list of regions to be deleted from META
    * @param regionsToAdd list of regions to be added to META
    * @throws IOException
    */
-  public static void mutateRegions(HConnection hConnection,
+  public static void mutateRegions(Connection connection,
                                    final List<HRegionInfo> regionsToRemove,
                                    final List<HRegionInfo> regionsToAdd)
     throws IOException {
@@ -1357,7 +1357,7 @@ public class MetaTableAccessor {
         mutation.add(makePutFromRegionInfo(hri));
       }
     }
-    mutateMetaTable(hConnection, mutation);
+    mutateMetaTable(connection, mutation);
     if (regionsToRemove != null && regionsToRemove.size() > 0) {
       LOG.debug("Deleted " + regionsToRemove);
     }
@@ -1368,34 +1368,34 @@ public class MetaTableAccessor {
 
   /**
    * Overwrites the specified regions from hbase:meta
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param regionInfos list of regions to be added to META
    * @throws IOException
    */
-  public static void overwriteRegions(HConnection hConnection,
+  public static void overwriteRegions(Connection connection,
                                       List<HRegionInfo> regionInfos) throws 
IOException {
-    deleteRegions(hConnection, regionInfos);
+    deleteRegions(connection, regionInfos);
     // Why sleep? This is the easiest way to ensure that the previous deletes 
does not
     // eclipse the following puts, that might happen in the same ts from the 
server.
     // See HBASE-9906, and HBASE-9879. Once either HBASE-9879, HBASE-8770 is 
fixed,
     // or HBASE-9905 is fixed and meta uses seqIds, we do not need the sleep.
     Threads.sleep(20);
-    addRegionsToMeta(hConnection, regionInfos);
+    addRegionsToMeta(connection, regionInfos);
     LOG.info("Overwritten " + regionInfos);
   }
 
   /**
    * Deletes merge qualifiers for the specified merged region.
-   * @param hConnection connection we're using
+   * @param connection connection we're using
    * @param mergedRegion
    * @throws IOException
    */
-  public static void deleteMergeQualifiers(HConnection hConnection,
+  public static void deleteMergeQualifiers(Connection connection,
                                            final HRegionInfo mergedRegion) 
throws IOException {
     Delete delete = new Delete(mergedRegion.getRegionName());
     delete.deleteColumns(HConstants.CATALOG_FAMILY, 
HConstants.MERGEA_QUALIFIER);
     delete.deleteColumns(HConstants.CATALOG_FAMILY, 
HConstants.MERGEB_QUALIFIER);
-    deleteFromMetaTable(hConnection, delete);
+    deleteFromMetaTable(connection, delete);
     LOG.info("Deleted references in merged region "
       + mergedRegion.getRegionNameAsString() + ", qualifier="
       + Bytes.toStringBinary(HConstants.MERGEA_QUALIFIER) + " and qualifier="

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
index 1f8a313..453a713 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
@@ -54,7 +54,7 @@ class ConnectionAdapter implements ClusterConnection {
 
   private final ClusterConnection wrappedConnection;
 
-  public ConnectionAdapter(HConnection c) {
+  public ConnectionAdapter(Connection c) {
     wrappedConnection = (ClusterConnection)c;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index be1cdd9..fe82165 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -214,7 +214,7 @@ public class HBaseAdmin implements Admin {
    * @deprecated Do not use this internal ctor.
    */
   @Deprecated
-  public HBaseAdmin(HConnection connection)
+  public HBaseAdmin(Connection connection)
       throws MasterNotRunningException, ZooKeeperConnectionException {
     this((ClusterConnection)connection);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 8a6575e..0143b7e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -268,14 +268,14 @@ public class HTable implements HTableInterface, 
RegionLocator {
    * @deprecated Do not use, internal ctor.
    */
   @Deprecated
-  public HTable(final byte[] tableName, final HConnection connection,
+  public HTable(final byte[] tableName, final Connection connection,
       final ExecutorService pool) throws IOException {
     this(TableName.valueOf(tableName), connection, pool);
   }
 
   /** @deprecated Do not use, internal ctor. */
   @Deprecated
-  public HTable(TableName tableName, final HConnection connection,
+  public HTable(TableName tableName, final Connection connection,
       final ExecutorService pool) throws IOException {
     this(tableName, (ClusterConnection)connection, pool);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
index aab547e..5604b73 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
@@ -30,7 +30,7 @@ interface Registry {
   /**
    * @param connection
    */
-  void init(HConnection connection);
+  void init(Connection connection);
 
   /**
    * @return Meta region location
@@ -54,4 +54,4 @@ interface Registry {
    * @throws IOException
    */
   int getCurrentNrHRS() throws IOException;
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
index 9123d50..c43b4e2 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
@@ -42,7 +42,7 @@ class ZooKeeperRegistry implements Registry {
   ConnectionManager.HConnectionImplementation hci;
 
   @Override
-  public void init(HConnection connection) {
+  public void init(Connection connection) {
     if (!(connection instanceof ConnectionManager.HConnectionImplementation)) {
       throw new RuntimeException("This registry depends on 
HConnectionImplementation");
     }
@@ -128,4 +128,4 @@ class ZooKeeperRegistry implements Registry {
         zkw.close();
     }
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
index df5e693..118a664 100644
--- 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
@@ -114,7 +114,7 @@ public class TestClientNoCluster extends Configured 
implements Tool {
     final ServerName META_HOST = META_SERVERNAME;
 
     @Override
-    public void init(HConnection connection) {
+    public void init(Connection connection) {
     }
 
     @Override
@@ -699,8 +699,8 @@ public class TestClientNoCluster extends Configured 
implements Tool {
    * @param sharedConnection
    * @throws IOException
    */
-  static void cycle(int id, final Configuration c, final HConnection 
sharedConnection) throws IOException {
-    Table table = sharedConnection.getTable(BIG_USER_TABLE);
+  static void cycle(int id, final Configuration c, final Connection 
sharedConnection) throws IOException {
+    Table table = sharedConnection.getTable(TableName.valueOf(BIG_USER_TABLE));
     table.setAutoFlushTo(false);
     long namespaceSpan = c.getLong("hbase.test.namespace.span", 1000000);
     long startTime = System.currentTimeMillis();
@@ -777,7 +777,7 @@ public class TestClientNoCluster extends Configured 
implements Tool {
     final ExecutorService pool = 
Executors.newCachedThreadPool(Threads.getNamedThreadFactory("p"));
       // Executors.newFixedThreadPool(servers * 10, 
Threads.getNamedThreadFactory("p"));
     // Share a connection so I can keep counts in the 'server' on concurrency.
-    final HConnection sharedConnection = 
HConnectionManager.createConnection(getConf()/*, pool*/);
+    final Connection sharedConnection = 
ConnectionFactory.createConnection(getConf()/*, pool*/);
     try {
       Thread [] ts = new Thread[clients];
       for (int j = 0; j < ts.length; j++) {
@@ -813,4 +813,4 @@ public class TestClientNoCluster extends Configured 
implements Tool {
   public static void main(String[] args) throws Exception {
     System.exit(ToolRunner.run(HBaseConfiguration.create(), new 
TestClientNoCluster(), args));
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
index cf31fec..d6f1b67 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
 import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan;
@@ -55,7 +55,7 @@ public class SnapshotOfRegionAssignmentFromMeta {
   private static final Log LOG = 
LogFactory.getLog(SnapshotOfRegionAssignmentFromMeta.class
       .getName());
 
-  private final HConnection hConnection;
+  private final Connection connection;
 
   /** the table name to region map */
   private final Map<TableName, List<HRegionInfo>> tableToRegionMap;
@@ -72,13 +72,13 @@ public class SnapshotOfRegionAssignmentFromMeta {
   private final Set<TableName> disabledTables;
   private final boolean excludeOfflinedSplitParents;
 
-  public SnapshotOfRegionAssignmentFromMeta(HConnection hConnection) {
-    this(hConnection, new HashSet<TableName>(), false);
+  public SnapshotOfRegionAssignmentFromMeta(Connection connection) {
+    this(connection, new HashSet<TableName>(), false);
   }
 
-  public SnapshotOfRegionAssignmentFromMeta(HConnection hConnection, 
Set<TableName> disabledTables,
+  public SnapshotOfRegionAssignmentFromMeta(Connection connection, 
Set<TableName> disabledTables,
       boolean excludeOfflinedSplitParents) {
-    this.hConnection = hConnection;
+    this.connection = connection;
     tableToRegionMap = new HashMap<TableName, List<HRegionInfo>>();
     regionToRegionServerMap = new HashMap<HRegionInfo, ServerName>();
     regionServerToRegionMap = new HashMap<ServerName, List<HRegionInfo>>();
@@ -141,7 +141,7 @@ public class SnapshotOfRegionAssignmentFromMeta {
       }
     };
     // Scan hbase:meta to pick up user regions
-    MetaTableAccessor.fullScan(hConnection, v);
+    MetaTableAccessor.fullScan(connection, v);
     //regionToRegionServerMap = regions;
     LOG.info("Finished to scan the hbase:meta for the current region 
assignment" +
       "snapshot");

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
index 83f75bd..01c1f89 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
@@ -87,12 +87,12 @@ public class FavoredNodeAssignmentHelper {
   /**
    * Update meta table with favored nodes info
    * @param regionToFavoredNodes map of HRegionInfo's to their favored nodes
-   * @param hConnection HConnection to be used
+   * @param connection connection to be used
    * @throws IOException
    */
   public static void updateMetaWithFavoredNodesInfo(
       Map<HRegionInfo, List<ServerName>> regionToFavoredNodes,
-      HConnection hConnection) throws IOException {
+      Connection connection) throws IOException {
     List<Put> puts = new ArrayList<Put>();
     for (Map.Entry<HRegionInfo, List<ServerName>> entry : 
regionToFavoredNodes.entrySet()) {
       Put put = makePutFromRegionInfo(entry.getKey(), entry.getValue());
@@ -100,7 +100,7 @@ public class FavoredNodeAssignmentHelper {
         puts.add(put);
       }
     }
-    MetaTableAccessor.putsToMetaTable(hConnection, puts);
+    MetaTableAccessor.putsToMetaTable(connection, puts);
     LOG.info("Added " + puts.size() + " regions in META");
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
index 73cda7e..ff074e8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.executor.EventType;
@@ -109,7 +109,7 @@ public class RestoreSnapshotHandler extends 
TableEventHandler implements Snapsho
   @Override
   protected void handleTableOperation(List<HRegionInfo> hris) throws 
IOException {
     MasterFileSystem fileSystemManager = masterServices.getMasterFileSystem();
-    HConnection conn = masterServices.getShortCircuitConnection();
+    Connection conn = masterServices.getShortCircuitConnection();
     FileSystem fs = fileSystemManager.getFileSystem();
     Path rootDir = fileSystemManager.getRootDir();
     TableName tableName = hTableDescriptor.getTableName();

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
index cab3947..7ed7bec 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
@@ -42,9 +42,9 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Row;
@@ -71,7 +71,7 @@ public class ReplicationSink {
 
   private static final Log LOG = LogFactory.getLog(ReplicationSink.class);
   private final Configuration conf;
-  private final HConnection sharedHtableCon;
+  private final Connection sharedHtableCon;
   private final MetricsSink metrics;
   private final AtomicLong totalReplicatedEdits = new AtomicLong();
 
@@ -87,7 +87,7 @@ public class ReplicationSink {
     this.conf = HBaseConfiguration.create(conf);
     decorateConf();
     this.metrics = new MetricsSink();
-    this.sharedHtableCon = HConnectionManager.createConnection(this.conf);
+    this.sharedHtableCon = ConnectionFactory.createConnection(this.conf);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index ef95fd9..f28125e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.Reference;
@@ -313,13 +313,13 @@ public class RestoreSnapshotHelper {
       regionsToRestore.add(hri);
     }
 
-    public void updateMetaParentRegions(HConnection hConnection,
+    public void updateMetaParentRegions(Connection connection,
         final List<HRegionInfo> regionInfos) throws IOException {
       if (regionInfos == null || parentsMap.isEmpty()) return;
 
       // Extract region names and offlined regions
       Map<String, HRegionInfo> regionsByName = new HashMap<String, 
HRegionInfo>(regionInfos.size());
-      List<HRegionInfo> parentRegions = new LinkedList();
+      List<HRegionInfo> parentRegions = new LinkedList<>();
       for (HRegionInfo regionInfo: regionInfos) {
         if (regionInfo.isSplitParent()) {
           parentRegions.add(regionInfo);
@@ -344,7 +344,7 @@ public class RestoreSnapshotHelper {
         }
 
         LOG.debug("Update splits parent " + regionInfo.getEncodedName() + " -> 
" + daughters);
-        MetaTableAccessor.addRegionToMeta(hConnection, regionInfo,
+        MetaTableAccessor.addRegionToMeta(connection, regionInfo,
           regionsByName.get(daughters.getFirst()),
           regionsByName.get(daughters.getSecond()));
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
index 67cef70..27b0048 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
@@ -34,6 +34,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.Row;
@@ -74,7 +75,7 @@ public class MultiHConnection {
     if (hConnections != null) {
       synchronized (hConnections) {
         if (hConnections != null) {
-          for (HConnection conn : hConnections) {
+          for (Connection conn : hConnections) {
             if (conn != null) {
               try {
                 conn.close();

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index d88fe32..48b5612 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -55,12 +55,13 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Waiter.Predicate;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionLocator;
@@ -3276,7 +3277,7 @@ public class HBaseTestingUtility extends 
HBaseCommonTestingUtility {
     }
 
     int totalNumberOfRegions = 0;
-    HConnection unmanagedConnection = 
HConnectionManager.createConnection(conf);
+    Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
     Admin admin = unmanagedConnection.getAdmin();
 
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 0c47a9e..28324d9 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -51,12 +51,12 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -257,7 +257,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
       ObjectMapper mapper = new ObjectMapper();
       TestOptions opts = mapper.readValue(value.toString(), TestOptions.class);
       Configuration conf = 
HBaseConfiguration.create(context.getConfiguration());
-      final HConnection con = HConnectionManager.createConnection(conf);
+      final Connection con = ConnectionFactory.createConnection(conf);
 
       // Evaluation task
       long elapsedTime = runOneClient(this.cmd, conf, con, opts, status);
@@ -381,7 +381,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
     long[] timings = new long[opts.numClientThreads];
     ExecutorService pool = Executors.newFixedThreadPool(opts.numClientThreads,
       new ThreadFactoryBuilder().setNameFormat("TestClient-%s").build());
-    final HConnection con = HConnectionManager.createConnection(conf);
+    final Connection con = ConnectionFactory.createConnection(conf);
     for (int i = 0; i < threads.length; i++) {
       final int index = i;
       threads[i] = pool.submit(new Callable<Long>() {
@@ -891,7 +891,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
     private final Status status;
     private final Sampler<?> traceSampler;
     private final SpanReceiverHost receiverHost;
-    protected HConnection connection;
+    protected Connection connection;
     protected Table table;
 
     private String testName;
@@ -903,7 +903,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
      * Note that all subclasses of this class must provide a public constructor
      * that has the exact same list of arguments.
      */
-    Test(final HConnection con, final TestOptions options, final Status 
status) {
+    Test(final Connection con, final TestOptions options, final Status status) 
{
       this.connection = con;
       this.conf = con.getConfiguration();
       this.opts = options;
@@ -964,7 +964,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 
     void testSetup() throws IOException {
       if (!opts.oneCon) {
-        this.connection = HConnectionManager.createConnection(conf);
+        this.connection = ConnectionFactory.createConnection(conf);
       }
       this.table = new HTable(TableName.valueOf(opts.tableName), connection);
       this.table.setAutoFlushTo(opts.autoFlush);
@@ -1099,7 +1099,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static class RandomSeekScanTest extends Test {
-    RandomSeekScanTest(HConnection con, TestOptions options, Status status) {
+    RandomSeekScanTest(Connection con, TestOptions options, Status status) {
       super(con, options, status);
     }
 
@@ -1129,7 +1129,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static abstract class RandomScanWithRangeTest extends Test {
-    RandomScanWithRangeTest(HConnection con, TestOptions options, Status 
status) {
+    RandomScanWithRangeTest(Connection con, TestOptions options, Status 
status) {
       super(con, options, status);
     }
 
@@ -1173,7 +1173,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static class RandomScanWithRange10Test extends RandomScanWithRangeTest {
-    RandomScanWithRange10Test(HConnection con, TestOptions options, Status 
status) {
+    RandomScanWithRange10Test(Connection con, TestOptions options, Status 
status) {
       super(con, options, status);
     }
 
@@ -1184,7 +1184,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static class RandomScanWithRange100Test extends RandomScanWithRangeTest {
-    RandomScanWithRange100Test(HConnection con, TestOptions options, Status 
status) {
+    RandomScanWithRange100Test(Connection con, TestOptions options, Status 
status) {
       super(con, options, status);
     }
 
@@ -1195,7 +1195,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static class RandomScanWithRange1000Test extends RandomScanWithRangeTest {
-    RandomScanWithRange1000Test(HConnection con, TestOptions options, Status 
status) {
+    RandomScanWithRange1000Test(Connection con, TestOptions options, Status 
status) {
       super(con, options, status);
     }
 
@@ -1206,7 +1206,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static class RandomScanWithRange10000Test extends RandomScanWithRangeTest {
-    RandomScanWithRange10000Test(HConnection con, TestOptions options, Status 
status) {
+    RandomScanWithRange10000Test(Connection con, TestOptions options, Status 
status) {
       super(con, options, status);
     }
 
@@ -1221,7 +1221,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
     private ArrayList<Get> gets;
     private Random rd = new Random();
 
-    RandomReadTest(HConnection con, TestOptions options, Status status) {
+    RandomReadTest(Connection con, TestOptions options, Status status) {
       super(con, options, status);
       consistency = options.replicas == DEFAULT_OPTS.replicas ? null : 
Consistency.TIMELINE;
       if (opts.multiGet > 0) {
@@ -1271,7 +1271,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static class RandomWriteTest extends Test {
-    RandomWriteTest(HConnection con, TestOptions options, Status status) {
+    RandomWriteTest(Connection con, TestOptions options, Status status) {
       super(con, options, status);
     }
 
@@ -1303,7 +1303,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   static class ScanTest extends Test {
     private ResultScanner testScanner;
 
-    ScanTest(HConnection con, TestOptions options, Status status) {
+    ScanTest(Connection con, TestOptions options, Status status) {
       super(con, options, status);
     }
 
@@ -1334,7 +1334,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static class SequentialReadTest extends Test {
-    SequentialReadTest(HConnection con, TestOptions options, Status status) {
+    SequentialReadTest(Connection con, TestOptions options, Status status) {
       super(con, options, status);
     }
 
@@ -1350,7 +1350,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   static class SequentialWriteTest extends Test {
-    SequentialWriteTest(HConnection con, TestOptions options, Status status) {
+    SequentialWriteTest(Connection con, TestOptions options, Status status) {
       super(con, options, status);
     }
 
@@ -1382,7 +1382,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   static class FilteredScanTest extends Test {
     protected static final Log LOG = 
LogFactory.getLog(FilteredScanTest.class.getName());
 
-    FilteredScanTest(HConnection con, TestOptions options, Status status) {
+    FilteredScanTest(Connection con, TestOptions options, Status status) {
       super(con, options, status);
     }
 
@@ -1492,7 +1492,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
     return format(random.nextInt(Integer.MAX_VALUE) % totalRows);
   }
 
-  static long runOneClient(final Class<? extends Test> cmd, Configuration 
conf, HConnection con,
+  static long runOneClient(final Class<? extends Test> cmd, Configuration 
conf, Connection con,
                            TestOptions opts, final Status status)
       throws IOException, InterruptedException {
     status.setStatus("Start " + cmd + " at offset " + opts.startRow + " for " +
@@ -1502,7 +1502,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
     final Test t;
     try {
       Constructor<? extends Test> constructor =
-        cmd.getDeclaredConstructor(HConnection.class, TestOptions.class, 
Status.class);
+        cmd.getDeclaredConstructor(Connection.class, TestOptions.class, 
Status.class);
       t = constructor.newInstance(con, opts, status);
     } catch (NoSuchMethodException e) {
       throw new IllegalArgumentException("Invalid command class: " +

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
index aec8bdc..79adf2b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
@@ -32,8 +32,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
+import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
@@ -52,7 +53,7 @@ import org.junit.experimental.categories.Category;
 public class TestMetaTableAccessor {
   private static final Log LOG = 
LogFactory.getLog(TestMetaTableAccessor.class);
   private static final  HBaseTestingUtility UTIL = new HBaseTestingUtility();
-  private static HConnection hConnection;
+  private static Connection connection;
 
   @BeforeClass public static void beforeClass() throws Exception {
     UTIL.startMiniCluster(3);
@@ -62,7 +63,7 @@ public class TestMetaTableAccessor {
     // responsive.  1 second is default as is ten retries.
     c.setLong("hbase.client.pause", 1000);
     c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 10);
-    hConnection = HConnectionManager.getConnection(c);
+    connection = HConnectionManager.getConnection(c);
   }
 
   @AfterClass public static void afterClass() throws Exception {
@@ -70,7 +71,7 @@ public class TestMetaTableAccessor {
   }
 
   /**
-   * Does {@link MetaTableAccessor#getRegion(HConnection, byte[])} and a write
+   * Does {@link MetaTableAccessor#getRegion(Connection, byte[])} and a write
    * against hbase:meta while its hosted server is restarted to prove our 
retrying
    * works.
    * @throws IOException
@@ -85,18 +86,18 @@ public class TestMetaTableAccessor {
     int regionCount = UTIL.createMultiRegions(t, HConstants.CATALOG_FAMILY);
     // Test it works getting a region from just made user table.
     final List<HRegionInfo> regions =
-      testGettingTableRegions(hConnection, name, regionCount);
-    MetaTask reader = new MetaTask(hConnection, "reader") {
+      testGettingTableRegions(connection, name, regionCount);
+    MetaTask reader = new MetaTask(connection, "reader") {
       @Override
       void metaTask() throws Throwable {
-        testGetRegion(hConnection, regions.get(0));
+        testGetRegion(connection, regions.get(0));
         LOG.info("Read " + regions.get(0).getEncodedName());
       }
     };
-    MetaTask writer = new MetaTask(hConnection, "writer") {
+    MetaTask writer = new MetaTask(connection, "writer") {
       @Override
       void metaTask() throws Throwable {
-        MetaTableAccessor.addRegionToMeta(hConnection, regions.get(0));
+        MetaTableAccessor.addRegionToMeta(connection, regions.get(0));
         LOG.info("Wrote " + regions.get(0).getEncodedName());
       }
     };
@@ -153,11 +154,11 @@ public class TestMetaTableAccessor {
     boolean stop = false;
     int count = 0;
     Throwable t = null;
-    final HConnection hConnection;
+    final Connection connection;
 
-    MetaTask(final HConnection hConnection, final String name) {
+    MetaTask(final Connection connection, final String name) {
       super(name);
-      this.hConnection = hConnection;
+      this.connection = connection;
     }
 
     @Override
@@ -199,23 +200,23 @@ public class TestMetaTableAccessor {
   throws IOException, InterruptedException {
     List<HRegionInfo> regions =
       MetaTableAccessor.getTableRegions(UTIL.getZooKeeperWatcher(),
-        hConnection, TableName.META_TABLE_NAME);
+        connection, TableName.META_TABLE_NAME);
     assertTrue(regions.size() >= 1);
     
assertTrue(MetaTableAccessor.getTableRegionsAndLocations(UTIL.getZooKeeperWatcher(),
-      hConnection,TableName.META_TABLE_NAME).size() >= 1);
+      connection,TableName.META_TABLE_NAME).size() >= 1);
   }
 
   @Test public void testTableExists() throws IOException {
     final TableName name =
         TableName.valueOf("testTableExists");
-    assertFalse(MetaTableAccessor.tableExists(hConnection, name));
+    assertFalse(MetaTableAccessor.tableExists(connection, name));
     UTIL.createTable(name, HConstants.CATALOG_FAMILY);
-    assertTrue(MetaTableAccessor.tableExists(hConnection, name));
+    assertTrue(MetaTableAccessor.tableExists(connection, name));
     Admin admin = UTIL.getHBaseAdmin();
     admin.disableTable(name);
     admin.deleteTable(name);
-    assertFalse(MetaTableAccessor.tableExists(hConnection, name));
-    assertTrue(MetaTableAccessor.tableExists(hConnection,
+    assertFalse(MetaTableAccessor.tableExists(connection, name));
+    assertTrue(MetaTableAccessor.tableExists(connection,
       TableName.META_TABLE_NAME));
   }
 
@@ -224,7 +225,7 @@ public class TestMetaTableAccessor {
     LOG.info("Started " + name);
     // Test get on non-existent region.
     Pair<HRegionInfo, ServerName> pair =
-      MetaTableAccessor.getRegion(hConnection, 
Bytes.toBytes("nonexistent-region"));
+      MetaTableAccessor.getRegion(connection, 
Bytes.toBytes("nonexistent-region"));
     assertNull(pair);
     LOG.info("Finished " + name);
   }
@@ -250,29 +251,29 @@ public class TestMetaTableAccessor {
     // Now make sure we only get the regions from 1 of the tables at a time
 
     assertEquals(1, 
MetaTableAccessor.getTableRegions(UTIL.getZooKeeperWatcher(),
-      hConnection, name).size());
+      connection, name).size());
     assertEquals(1, 
MetaTableAccessor.getTableRegions(UTIL.getZooKeeperWatcher(),
-      hConnection, greaterName).size());
+      connection, greaterName).size());
   }
 
-  private static List<HRegionInfo> testGettingTableRegions(final HConnection 
hConnection,
+  private static List<HRegionInfo> testGettingTableRegions(final Connection 
connection,
       final TableName name, final int regionCount)
   throws IOException, InterruptedException {
     List<HRegionInfo> regions = 
MetaTableAccessor.getTableRegions(UTIL.getZooKeeperWatcher(),
-      hConnection, name);
+      connection, name);
     assertEquals(regionCount, regions.size());
     Pair<HRegionInfo, ServerName> pair =
-      MetaTableAccessor.getRegion(hConnection, regions.get(0).getRegionName());
+      MetaTableAccessor.getRegion(connection, regions.get(0).getRegionName());
     assertEquals(regions.get(0).getEncodedName(),
       pair.getFirst().getEncodedName());
     return regions;
   }
 
-  private static void testGetRegion(final HConnection hConnection,
+  private static void testGetRegion(final Connection connection,
       final HRegionInfo region)
   throws IOException, InterruptedException {
     Pair<HRegionInfo, ServerName> pair =
-      MetaTableAccessor.getRegion(hConnection, region.getRegionName());
+      MetaTableAccessor.getRegion(connection, region.getRegionName());
     assertEquals(region.getEncodedName(),
       pair.getFirst().getEncodedName());
   }
@@ -333,22 +334,22 @@ public class TestMetaTableAccessor {
     long seqNum100 = random.nextLong();
 
 
-    Table meta = MetaTableAccessor.getMetaHTable(hConnection);
+    Table meta = MetaTableAccessor.getMetaHTable(connection);
     try {
-      MetaTableAccessor.updateRegionLocation(hConnection, primary, 
serverName0, seqNum0);
+      MetaTableAccessor.updateRegionLocation(connection, primary, serverName0, 
seqNum0);
 
       // assert that the server, startcode and seqNum columns are there for 
the primary region
       assertMetaLocation(meta, primary.getRegionName(), serverName0, seqNum0, 
0, true);
 
       // add replica = 1
-      MetaTableAccessor.updateRegionLocation(hConnection, replica1, 
serverName1, seqNum1);
+      MetaTableAccessor.updateRegionLocation(connection, replica1, 
serverName1, seqNum1);
       // check whether the primary is still there
       assertMetaLocation(meta, primary.getRegionName(), serverName0, seqNum0, 
0, true);
       // now check for replica 1
       assertMetaLocation(meta, primary.getRegionName(), serverName1, seqNum1, 
1, true);
 
       // add replica = 1
-      MetaTableAccessor.updateRegionLocation(hConnection, replica100, 
serverName100, seqNum100);
+      MetaTableAccessor.updateRegionLocation(connection, replica100, 
serverName100, seqNum100);
       // check whether the primary is still there
       assertMetaLocation(meta, primary.getRegionName(), serverName0, seqNum0, 
0, true);
       // check whether the replica 1 is still there

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java
index 1d6ef77..52a158a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java
@@ -77,7 +77,7 @@ public class TestClientTimeouts {
    */
   @Test
   public void testAdminTimeout() throws Exception {
-    HConnection lastConnection = null;
+    Connection lastConnection = null;
     boolean lastFailed = false;
     int initialInvocations = RandomTimeoutBlockingRpcChannel.invokations.get();
     RpcClient rpcClient = newRandomTimeoutRpcClient();
@@ -90,7 +90,7 @@ public class TestClientTimeouts {
         HBaseAdmin admin = null;
         try {
           admin = new HBaseAdmin(conf);
-          HConnection connection = admin.getConnection();
+          Connection connection = admin.getConnection();
           assertFalse(connection == lastConnection);
           lastConnection = connection;
           // Override the connection's rpc client for timeout testing

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index c3b3bbd..69becbd 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -4126,7 +4126,7 @@ public class TestFromClientSide {
   public void testUnmanagedHConnectionReconnect() throws Exception {
     final byte[] tableName = 
Bytes.toBytes("testUnmanagedHConnectionReconnect");
     HTable t = createUnmangedHConnectionHTable(tableName);
-    HConnection conn = t.getConnection();
+    Connection conn = t.getConnection();
     HBaseAdmin ha = new HBaseAdmin(conn);
     assertTrue(ha.tableExists(tableName));
     assertTrue(t.get(new Get(ROW)).isEmpty());

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
index 7a818aa..ed26e73 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
@@ -211,7 +211,7 @@ public class TestHCM {
    */
   @Test
   public void testAdminFactory() throws IOException {
-    HConnection con1 = 
HConnectionManager.createConnection(TEST_UTIL.getConfiguration());
+    Connection con1 = 
ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
     Admin admin = con1.getAdmin();
     assertTrue(admin.getConnection() == con1);
     assertTrue(admin.getConfiguration() == TEST_UTIL.getConfiguration());
@@ -778,16 +778,16 @@ public class TestHCM {
   @Test
   public void testConnectionManagement() throws Exception{
     Table table0 = TEST_UTIL.createTable(TABLE_NAME1, FAM_NAM);
-    HConnection conn = 
HConnectionManager.createConnection(TEST_UTIL.getConfiguration());
-    Table table = conn.getTable(TABLE_NAME1.getName());
+    Connection conn = 
ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
+    HTable table = (HTable) conn.getTable(TABLE_NAME1);
     table.close();
     assertFalse(conn.isClosed());
-    assertFalse(((HTable)table).getPool().isShutdown());
-    table = conn.getTable(TABLE_NAME1.getName());
+    assertFalse(table.getPool().isShutdown());
+    table = (HTable) conn.getTable(TABLE_NAME1);
     table.close();
-    assertFalse(((HTable)table).getPool().isShutdown());
+    assertFalse(table.getPool().isShutdown());
     conn.close();
-    assertTrue(((HTable)table).getPool().isShutdown());
+    assertTrue(table.getPool().isShutdown());
     table0.close();
   }
 
@@ -845,14 +845,14 @@ public class TestHCM {
    */
   @Test
   public void testConnectionSameness() throws Exception {
-    HConnection previousConnection = null;
+    Connection previousConnection = null;
     for (int i = 0; i < 2; i++) {
       // set random key to differentiate the connection from previous ones
       Configuration configuration = TEST_UTIL.getConfiguration();
       configuration.set("some_key", String.valueOf(_randy.nextInt()));
       LOG.info("The hash code of the current configuration is: "
           + configuration.hashCode());
-      HConnection currentConnection = HConnectionManager
+      Connection currentConnection = HConnectionManager
           .getConnection(configuration);
       if (previousConnection != null) {
         assertTrue(
@@ -883,7 +883,7 @@ public class TestHCM {
     // to set up a session and test runs for a long time.
     int maxConnections = Math.min(zkmaxconnections - 1, 20);
     List<HConnection> connections = new ArrayList<HConnection>(maxConnections);
-    HConnection previousConnection = null;
+    Connection previousConnection = null;
     try {
       for (int i = 0; i < maxConnections; i++) {
         // set random key to differentiate the connection from previous ones
@@ -912,7 +912,7 @@ public class TestHCM {
         connections.add(currentConnection);
       }
     } finally {
-      for (HConnection c: connections) {
+      for (Connection c: connections) {
         // Clean up connections made so we don't interfere w/ subsequent tests.
         HConnectionManager.deleteConnection(c.getConfiguration());
       }
@@ -926,12 +926,12 @@ public class TestHCM {
     configuration.set(HConstants.HBASE_CLIENT_INSTANCE_ID,
         String.valueOf(_randy.nextInt()));
 
-    HConnection c1 = HConnectionManager.createConnection(configuration);
+    Connection c1 = ConnectionFactory.createConnection(configuration);
     // We create two connections with the same key.
-    HConnection c2 = HConnectionManager.createConnection(configuration);
+    Connection c2 = ConnectionFactory.createConnection(configuration);
 
-    HConnection c3 = HConnectionManager.getConnection(configuration);
-    HConnection c4 = HConnectionManager.getConnection(configuration);
+    Connection c3 = HConnectionManager.getConnection(configuration);
+    Connection c4 = HConnectionManager.getConnection(configuration);
     assertTrue(c3 == c4);
 
     c1.close();
@@ -945,7 +945,7 @@ public class TestHCM {
     c3.close();
     assertTrue(c3.isClosed());
     // c3 was removed from the cache
-    HConnection c5 = HConnectionManager.getConnection(configuration);
+    Connection c5 = HConnectionManager.getConnection(configuration);
     assertTrue(c5 != c3);
 
     assertFalse(c2.isClosed());
@@ -962,13 +962,13 @@ public class TestHCM {
   @Test
   public void testCreateConnection() throws Exception {
     Configuration configuration = TEST_UTIL.getConfiguration();
-    HConnection c1 = HConnectionManager.createConnection(configuration);
-    HConnection c2 = HConnectionManager.createConnection(configuration);
+    Connection c1 = ConnectionFactory.createConnection(configuration);
+    Connection c2 = ConnectionFactory.createConnection(configuration);
     // created from the same configuration, yet they are different
     assertTrue(c1 != c2);
     assertTrue(c1.getConfiguration() == c2.getConfiguration());
     // make sure these were not cached
-    HConnection c3 = HConnectionManager.getConnection(configuration);
+    Connection c3 = HConnectionManager.getConnection(configuration);
     assertTrue(c1 != c3);
     assertTrue(c2 != c3);
   }
@@ -1229,7 +1229,7 @@ public class TestHCM {
 
     // Use connection multiple times.
     for (int i = 0; i < 30; i++) {
-      HConnection c1 = null;
+      Connection c1 = null;
       try {
         c1 = ConnectionManager.getConnectionInternal(config);
         LOG.info("HTable connection " + i + " " + c1);
@@ -1272,7 +1272,7 @@ public class TestHCM {
     TableName tableName = 
TableName.valueOf("testConnectionRideOverClusterRestart");
     TEST_UTIL.createTable(tableName.getName(), new byte[][] {FAM_NAM}, 
config).close();
 
-    HConnection connection = HConnectionManager.createConnection(config);
+    Connection connection = ConnectionFactory.createConnection(config);
     Table table = connection.getTable(tableName);
 
     // this will cache the meta location and table's region location

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
index 8cecbc5..27848ef 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
@@ -44,10 +44,9 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
-import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
@@ -288,7 +287,7 @@ public class TestMasterOperationsForRegionReplicas {
   }
 
   private void validateNumberOfRowsInMeta(final TableName table, int 
numRegions,
-      HConnection hConnection) throws IOException {
+      Connection connection) throws IOException {
     assert(admin.tableExists(table));
     final AtomicInteger count = new AtomicInteger();
     Visitor visitor = new Visitor() {
@@ -298,14 +297,14 @@ public class TestMasterOperationsForRegionReplicas {
         return true;
       }
     };
-    MetaTableAccessor.fullScan(hConnection, visitor);
+    MetaTableAccessor.fullScan(connection, visitor);
     assert(count.get() == numRegions);
   }
 
   private void validateFromSnapshotFromMeta(HBaseTestingUtility util, 
TableName table,
-      int numRegions, int numReplica, HConnection hConnection) throws 
IOException {
+      int numRegions, int numReplica, Connection connection) throws 
IOException {
     SnapshotOfRegionAssignmentFromMeta snapshot = new 
SnapshotOfRegionAssignmentFromMeta(
-      hConnection);
+      connection);
     snapshot.initialize();
     Map<HRegionInfo, ServerName> regionToServerMap = 
snapshot.getRegionToRegionServerMap();
     assert(regionToServerMap.size() == numRegions * numReplica + 1); //'1' for 
the namespace
@@ -329,10 +328,10 @@ public class TestMasterOperationsForRegionReplicas {
     }
   }
 
-  private void validateSingleRegionServerAssignment(HConnection hConnection, 
int numRegions,
+  private void validateSingleRegionServerAssignment(Connection connection, int 
numRegions,
       int numReplica) throws IOException {
     SnapshotOfRegionAssignmentFromMeta snapshot = new 
SnapshotOfRegionAssignmentFromMeta(
-      hConnection);
+      connection);
     snapshot.initialize();
     Map<HRegionInfo, ServerName>  regionToServerMap = 
snapshot.getRegionToRegionServerMap();
     assertEquals(regionToServerMap.size(), numRegions * numReplica + 1); //'1' 
for the namespace

http://git-wip-us.apache.org/repos/asf/hbase/blob/669bc49b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index a6e3d71..7a79d2d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -51,10 +51,10 @@ import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
@@ -2013,7 +2013,7 @@ public class TestAccessController extends SecureTestUtil {
     AccessTestAction deleteTableAction = new AccessTestAction() {
       @Override
       public Object run() throws Exception {
-        HConnection unmanagedConnection = 
HConnectionManager.createConnection(TEST_UTIL.getConfiguration());
+        Connection unmanagedConnection = 
ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
         Admin admin = unmanagedConnection.getAdmin();
         try {
           admin.disableTable(TEST_TABLE.getTableName());

Reply via email to