Repository: hbase
Updated Branches:
  refs/heads/HBASE-7912 e35f7b920 -> b14e2ab1c


http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java
index 3da7860..cc2ecdf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.backup.util;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -39,26 +38,21 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupRestoreServerFactory;
 import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.backup.RestoreService;
 import org.apache.hadoop.hbase.backup.RestoreRequest;
+import org.apache.hadoop.hbase.backup.RestoreService;
 import org.apache.hadoop.hbase.backup.impl.BackupManifest;
 import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
-import org.apache.hadoop.hbase.master.MasterServices;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HStore;
@@ -68,7 +62,6 @@ import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.apache.hadoop.hbase.util.Pair;
 
 /**
  * A collection for methods used by multiple classes to restore HBase tables.
@@ -172,33 +165,21 @@ public class RestoreServerUtil {
     return regionDirList;
   }
   
-  static void modifyTableSync(MasterServices svc, HTableDescriptor desc) 
throws IOException {
-    svc.modifyTable(desc.getTableName(), desc, HConstants.NO_NONCE, 
HConstants.NO_NONCE);
-    @SuppressWarnings("serial")
-    Pair<Integer, Integer> status = new Pair<Integer, Integer>() {{
-      setFirst(0);
-      setSecond(0);
-    }};
-    int i = 0;
-    do {
-      status = svc.getAssignmentManager().getReopenStatus(desc.getTableName());
-      if (status.getSecond() != 0) {
-        LOG.debug(status.getSecond() - status.getFirst() + "/" + 
status.getSecond()
-          + " regions updated.");
-        try {
-          Thread.sleep(1 * 1000l);
-        } catch (InterruptedException ie) {
-          InterruptedIOException iie = new InterruptedIOException();
-          iie.initCause(ie);
-          throw iie;
+  static void modifyTableSync(Connection conn, HTableDescriptor desc) throws 
IOException {
+    
+    try (Admin admin = conn.getAdmin();) {
+      admin.modifyTable(desc.getTableName(), desc);
+      int attempt = 0;
+      int maxAttempts = 600;
+      while( !admin.isTableAvailable(desc.getTableName())) {
+        Thread.sleep(100); 
+        attempt++;
+        if( attempt++ > maxAttempts) {
+          throw new IOException("Timeout expired "+(maxAttempts * 100)+"ms");
         }
-      } else {
-        LOG.debug("All regions updated.");
-        break;
       }
-    } while (status.getFirst() != 0 && i++ < 500);
-    if (status.getFirst() != 0) {
-      throw new IOException("Failed to update all regions even after 500 
seconds.");
+    } catch (Exception e) {
+      throw new IOException(e);
     }
   }
 
@@ -206,7 +187,7 @@ public class RestoreServerUtil {
    * During incremental backup operation. Call WalPlayer to replay WAL in 
backup image Currently
    * tableNames and newTablesNames only contain single table, will be expanded 
to multiple tables in
    * the future
-   * @param svc MasterServices
+   * @param conn HBase connection
    * @param tableBackupPath backup path
    * @param logDirs : incremental backup folders, which contains WAL
    * @param tableNames : source tableNames(table names were backuped)
@@ -214,9 +195,10 @@ public class RestoreServerUtil {
    * @param incrBackupId incremental backup Id
    * @throws IOException exception
    */
-  public void incrementalRestoreTable(MasterServices svc, Path 
tableBackupPath, Path[] logDirs,
+  public void incrementalRestoreTable(Connection conn, Path tableBackupPath, 
Path[] logDirs,
       TableName[] tableNames, TableName[] newTableNames, String incrBackupId) 
throws IOException {
 
+    try (Admin admin = conn.getAdmin();) {
     if (tableNames.length != newTableNames.length) {
       throw new IOException("Number of source tables and target tables does 
not match!");
     }
@@ -225,7 +207,7 @@ public class RestoreServerUtil {
     // for incremental backup image, expect the table already created either 
by user or previous
     // full backup. Here, check that all new tables exists
     for (TableName tableName : newTableNames) {
-      if (!MetaTableAccessor.tableExists(svc.getConnection(), tableName)) {
+      if (!admin.tableExists(tableName)) {
         throw new IOException("HBase table " + tableName
             + " does not exist. Create the table first, e.g. by restoring a 
full backup.");
       }
@@ -237,7 +219,7 @@ public class RestoreServerUtil {
       LOG.debug("Found descriptor " + tableDescriptor + " through " + 
incrBackupId);
 
       TableName newTableName = newTableNames[i];
-      HTableDescriptor newTableDescriptor = 
svc.getTableDescriptors().get(newTableName);
+      HTableDescriptor newTableDescriptor = 
admin.getTableDescriptor(newTableName);
       List<HColumnDescriptor> families = 
Arrays.asList(tableDescriptor.getColumnFamilies());
       List<HColumnDescriptor> existingFamilies =
           Arrays.asList(newTableDescriptor.getColumnFamilies());
@@ -255,7 +237,7 @@ public class RestoreServerUtil {
         }
       }
       if (schemaChangeNeeded) {
-        RestoreServerUtil.modifyTableSync(svc, newTableDescriptor);
+        RestoreServerUtil.modifyTableSync(conn, newTableDescriptor);
         LOG.info("Changed " + newTableDescriptor.getTableName() + " to: " + 
newTableDescriptor);
       }
     }
@@ -263,12 +245,13 @@ public class RestoreServerUtil {
         BackupRestoreServerFactory.getRestoreService(conf);
 
     restoreService.run(logDirs, tableNames, newTableNames, false);
+    }
   }
 
-  public void fullRestoreTable(MasterServices svc, Path tableBackupPath, 
TableName tableName,
+  public void fullRestoreTable(Connection conn, Path tableBackupPath, 
TableName tableName,
       TableName newTableName, boolean truncateIfExists, String 
lastIncrBackupId)
           throws IOException {
-    restoreTableAndCreate(svc, tableName, newTableName, tableBackupPath, 
truncateIfExists,
+    restoreTableAndCreate(conn, tableName, newTableName, tableBackupPath, 
truncateIfExists,
         lastIncrBackupId);
   }
 
@@ -386,7 +369,7 @@ public class RestoreServerUtil {
     return null;
   }
 
-  private void restoreTableAndCreate(MasterServices svc, TableName tableName,
+  private void restoreTableAndCreate(Connection conn, TableName tableName,
       TableName newTableName, Path tableBackupPath, boolean truncateIfExists,
       String lastIncrBackupId) throws IOException {
     if (newTableName == null || newTableName.equals("")) {
@@ -434,7 +417,7 @@ public class RestoreServerUtil {
               + ", will only create table");
         }
         tableDescriptor.setName(newTableName);
-        checkAndCreateTable(svc, tableBackupPath, tableName, newTableName, 
null, tableDescriptor,
+        checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, 
null, tableDescriptor,
           truncateIfExists);
         return;
       } else {
@@ -461,7 +444,7 @@ public class RestoreServerUtil {
 
       // should only try to create the table with all region informations, so 
we could pre-split
       // the regions in fine grain
-      checkAndCreateTable(svc, tableBackupPath, tableName, newTableName, 
regionPathList,
+      checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, 
regionPathList,
         tableDescriptor, truncateIfExists);
 
       // Now get region splits from full backup
@@ -702,18 +685,18 @@ public class RestoreServerUtil {
    * @param htd table descriptor
    * @throws IOException exception
    */
-  private void checkAndCreateTable(MasterServices svc, Path tableBackupPath, 
TableName tableName,
+  private void checkAndCreateTable(Connection conn, Path tableBackupPath, 
TableName tableName,
       TableName targetTableName, ArrayList<Path> regionDirList, 
       HTableDescriptor htd, boolean truncateIfExists)
           throws IOException {
-    try {
+    try (Admin admin = conn.getAdmin();){
       boolean createNew = false;
-      if (MetaTableAccessor.tableExists(svc.getConnection(), targetTableName)) 
{
+      if (admin.tableExists(targetTableName)) {
         if(truncateIfExists) {
           LOG.info("Truncating exising target table '" + targetTableName +
             "', preserving region splits");
-          svc.disableTable(targetTableName, HConstants.NO_NONCE, 
HConstants.NO_NONCE);
-          svc.truncateTable(targetTableName, true, HConstants.NO_NONCE, 
HConstants.NO_NONCE);
+          admin.disableTable(targetTableName);
+          admin.truncateTable(targetTableName, true);
         } else{
           LOG.info("Using exising target table '" + targetTableName + "'");
         }
@@ -724,14 +707,14 @@ public class RestoreServerUtil {
         LOG.info("Creating target table '" + targetTableName + "'");
         byte[][] keys = null;
         if (regionDirList == null || regionDirList.size() == 0) {
-          svc.createTable(htd, null, HConstants.NO_NONCE, HConstants.NO_NONCE);
+          admin.createTable(htd, null);
         } else {
           keys = generateBoundaryKeys(regionDirList);
           // create table using table descriptor and region boundaries
-          svc.createTable(htd, keys, HConstants.NO_NONCE, HConstants.NO_NONCE);
+          admin.createTable(htd, keys);
         }
         long startTime = EnvironmentEdgeManager.currentTime();
-        while 
(!((ClusterConnection)svc.getConnection()).isTableAvailable(targetTableName, 
keys)) {
+        while (!admin.isTableAvailable(targetTableName, keys)) {
           Thread.sleep(100);
           if (EnvironmentEdgeManager.currentTime() - startTime > 
TABLE_AVAILABILITY_WAIT_TIME) {
             throw new IOException("Time out "+TABLE_AVAILABILITY_WAIT_TIME+

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index a02f011..97f08e0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -83,9 +83,9 @@ import org.apache.hadoop.hbase.backup.impl.BackupManager;
 import org.apache.hadoop.hbase.backup.impl.BackupManifest;
 import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.backup.master.FullTableBackupProcedure;
-import org.apache.hadoop.hbase.backup.master.IncrementalTableBackupProcedure;
-import org.apache.hadoop.hbase.backup.master.RestoreTablesProcedure;
+import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient;
+import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient;
+import org.apache.hadoop.hbase.backup.impl.RestoreTablesClient;
 import org.apache.hadoop.hbase.backup.util.RestoreServerUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
@@ -2617,120 +2617,7 @@ public class HMaster extends HRegionServer implements 
MasterServices {
     return procInfoList;
   }
 
-  @Override
-  public Pair<Long, String> backupTables(final BackupType type,
-        List<TableName> tableList, final String targetRootDir, final int 
workers,
-        final long bandwidth, final String setName,
-        final long nonceGroup, final long nonce) throws IOException {
-    long procId;
-    String backupId = (setName == null || setName.length() == 0? 
-        BackupRestoreConstants.BACKUPID_PREFIX: setName + "_") + 
-        EnvironmentEdgeManager.currentTime();
-    if (type == BackupType.INCREMENTAL) {
-      Set<TableName> incrTableSet = null;
-      try (BackupSystemTable table = new BackupSystemTable(getConnection())) {
-        incrTableSet = table.getIncrementalBackupTableSet(targetRootDir);
-      }
-         
-      if (incrTableSet.isEmpty()) {
-        LOG.warn("Incremental backup table set contains no table.\n"
-            + "Use 'backup create full' or 'backup stop' to \n "
-            + "change the tables covered by incremental backup.");
-        throw new DoNotRetryIOException("No table covered by incremental 
backup.");
-      }
-
-      tableList.removeAll(incrTableSet);
-      if (!tableList.isEmpty()) {
-        String extraTables = StringUtils.join(",", tableList);
-        LOG.error("Some tables (" + extraTables + ") haven't gone through full 
backup");
-        throw new DoNotRetryIOException("Perform full backup on " + 
extraTables + " first, "
-            + "then retry the command");
-      }
-      LOG.info("Incremental backup for the following table set: " + 
incrTableSet);
-      tableList = Lists.newArrayList(incrTableSet);
-    }
-    if (tableList != null && !tableList.isEmpty()) {
-      for (TableName table : tableList) {
-        String targetTableBackupDir =
-            HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, 
table);
-        Path targetTableBackupDirPath = new Path(targetTableBackupDir);
-        FileSystem outputFs = FileSystem.get(targetTableBackupDirPath.toUri(), 
conf);
-        if (outputFs.exists(targetTableBackupDirPath)) {
-          throw new DoNotRetryIOException("Target backup directory " + 
targetTableBackupDir
-            + " exists already.");
-        }
-      }
-      ArrayList<TableName> nonExistingTableList = null;
-      for (TableName tableName : tableList) {
-        if (!MetaTableAccessor.tableExists(getConnection(), tableName)) {
-          if (nonExistingTableList == null) {
-            nonExistingTableList = new ArrayList<>();
-          }
-          nonExistingTableList.add(tableName);
-        }
-      }
-      if (nonExistingTableList != null) {
-        if (type == BackupType.INCREMENTAL ) {
-          LOG.warn("Incremental backup table set contains non-exising table: "
-              + nonExistingTableList);
-          // Update incremental backup set 
-          tableList = excludeNonExistingTables(tableList, 
nonExistingTableList);
-        } else {
-          // Throw exception only in full mode - we try to backup non-existing 
table
-          throw new DoNotRetryIOException("Non-existing tables found in the 
table list: "
-              + nonExistingTableList);
-        }
-      }
-    }
-    if (type == BackupType.FULL) {
-      procId = this.procedureExecutor.submitProcedure(
-        new FullTableBackupProcedure(procedureExecutor.getEnvironment(), 
backupId,
-          tableList, targetRootDir, workers, bandwidth), nonceGroup, nonce);
-    } else {
-      procId = this.procedureExecutor.submitProcedure(
-        new 
IncrementalTableBackupProcedure(procedureExecutor.getEnvironment(), backupId,
-          tableList, targetRootDir, workers, bandwidth), nonceGroup, nonce);
-    }
-    return new Pair<>(procId, backupId);
-  }
 
-  private List<TableName> excludeNonExistingTables(List<TableName> tableList,
-      List<TableName> nonExistingTableList) {
-    
-    for(TableName table: nonExistingTableList) {
-      tableList.remove(table);
-    }
-    return tableList;
-  }
-
-  @Override
-  public long restoreTables(String backupRootDir, String backupId, boolean 
check,
-      List<TableName> sTableList, List<TableName> tTableList, boolean 
isOverwrite,
-      final long nonceGroup, final long nonce) throws IOException {
-    if (check) {
-      HashMap<TableName, BackupManifest> backupManifestMap = new HashMap<>();
-      // check and load backup image manifest for the tables
-      Path rootPath = new Path(backupRootDir);
-      HBackupFileSystem.checkImageManifestExist(backupManifestMap,
-        sTableList.toArray(new TableName[sTableList.size()]),
-        conf, rootPath, backupId);
-
-      // Check and validate the backup image and its dependencies
-      if (check) {
-        if (RestoreServerUtil.validate(backupManifestMap, conf)) {
-          LOG.info("Checking backup images: ok");
-        } else {
-          String errMsg = "Some dependencies are missing for restore";
-          LOG.error(errMsg);
-          throw new IOException(errMsg);
-        }
-      }
-    }
-    long procId = this.procedureExecutor.submitProcedure(
-      new RestoreTablesProcedure(procedureExecutor.getEnvironment(), 
backupRootDir, backupId,
-        sTableList, tTableList, isOverwrite), nonceGroup, nonce);
-    return procId;
-  }
 
   /**
    * Returns the list of table descriptors that match the specified request

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 8025a67..2c577c3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1053,48 +1053,6 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   @Override
-  public MasterProtos.BackupTablesResponse backupTables(
-      RpcController controller,
-      MasterProtos.BackupTablesRequest request)  throws ServiceException {
-    try {
-      BackupTablesResponse.Builder response = 
BackupTablesResponse.newBuilder();
-      List<TableName> tablesList = new 
ArrayList<>(request.getTablesList().size());
-      for (HBaseProtos.TableName table : request.getTablesList()) {
-        tablesList.add(ProtobufUtil.toTableName(table));
-      }
-      Pair<Long, String> pair = master.backupTables(
-        BackupType.valueOf(request.getType().name()), tablesList, 
request.getTargetRootDir(),
-        (int)request.getWorkers(), request.getBandwidth(), 
request.getBackupSetName(), 
-        request.getNonceGroup(), request.getNonce());
-      return 
response.setProcId(pair.getFirst()).setBackupId(pair.getSecond()).build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public MasterProtos.RestoreTablesResponse restoreTables(
-      RpcController controller,
-      MasterProtos.RestoreTablesRequest request)  throws ServiceException {
-    try {
-      RestoreTablesResponse.Builder response = 
RestoreTablesResponse.newBuilder();
-      List<TableName> tablesList = new 
ArrayList<>(request.getTablesList().size());
-      for (HBaseProtos.TableName table : request.getTablesList()) {
-        tablesList.add(ProtobufUtil.toTableName(table));
-      }
-      List<TableName> targetTablesList = new 
ArrayList<>(request.getTargetTablesList().size());
-      for (HBaseProtos.TableName table : request.getTargetTablesList()) {
-        targetTablesList.add(ProtobufUtil.toTableName(table));
-      }
-      long procId = master.restoreTables(request.getBackupRootDir(), 
request.getBackupId(),
-        request.getDependencyCheckOnly(), tablesList, targetTablesList, 
request.getOverwrite(),
-        request.getNonceGroup(), request.getNonce());
-      return response.setProcId(procId).build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-  @Override
   public ListTableDescriptorsByNamespaceResponse 
listTableDescriptorsByNamespace(RpcController c,
       ListTableDescriptorsByNamespaceRequest request) throws ServiceException {
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index d147ce2..1d7ef4e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -181,36 +181,6 @@ public interface MasterServices extends Server {
       final long nonce)
       throws IOException;
 
-  /**
-   * Full backup given list of tables
-   * @param type whether the backup is full or incremental
-   * @param tableList list of tables to backup
-   * @param targetRootDir root dir for saving the backup
-   * @param workers number of paralle workers. -1 - system defined
-   * @param bandwidth bandwidth per worker in MB per sec. -1 - unlimited
-   * @param setName - backup set name
-   * @param nonceGroup nonce group
-   * @param nonce nonce
-   * @return pair of procedure Id and backupId
-   * @throws IOException
-   */
-  public Pair<Long, String> backupTables(
-      final BackupType type,
-      List<TableName> tableList,
-      final String targetRootDir,
-      final int workers,
-      final long bandwidth,
-      final String setName,
-      final long nonceGroup,
-      final long nonce) throws IOException;
-
-  /*
-   * Restore table set
-   */
-  public long restoreTables(String backupRootDir,
-      String backupId, boolean check, List<TableName> sTableList,
-      List<TableName> tTableList, boolean isOverwrite, long nonceGroup, long 
nonce)
-          throws IOException;
 
   /**
    * Enable an existing table

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
index ec53a64..2da7871 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -21,7 +21,9 @@ package org.apache.hadoop.hbase.backup;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
+import java.util.Map.Entry;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -39,8 +41,8 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.BackupAdmin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Durability;
@@ -116,6 +118,15 @@ public class TestBackupBase {
     LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR);
     waitForSystemTable();
     createTables();
+    
populateFromMasterConfig(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(),
 conf1);
+  }
+  
+  private static void populateFromMasterConfig(Configuration masterConf, 
Configuration conf) {
+    Iterator<Entry<String,String>> it = masterConf.iterator();  
+    while(it.hasNext()) {
+      Entry<String,String> e = it.next();
+      conf.set(e.getKey(), e.getValue());
+    }
   }
   
   public static void waitForSystemTable() throws Exception
@@ -157,23 +168,18 @@ public class TestBackupBase {
   protected String backupTables(BackupType type, List<TableName> tables, 
String path)
       throws IOException {
     Connection conn = null;
-    HBaseAdmin admin = null;
     BackupAdmin badmin = null;
     String backupId;
     try {
       conn = ConnectionFactory.createConnection(conf1);
-      admin = (HBaseAdmin) conn.getAdmin();
-      BackupRequest request = new BackupRequest();
+      badmin = new HBaseBackupAdmin(conn);
+      BackupRequest request = new BackupRequest();      
       request.setBackupType(type).setTableList(tables).setTargetRootDir(path);
-      badmin = admin.getBackupAdmin();
       backupId = badmin.backupTables(request);
     } finally {
       if(badmin != null){
         badmin.close();
       }
-      if (admin != null) {
-        admin.close();
-      }
       if (conn != null) {
         conn.close();
       }
@@ -264,7 +270,7 @@ public class TestBackupBase {
   }
 
   protected BackupAdmin getBackupAdmin() throws IOException {
-    return TEST_UTIL.getAdmin().getBackupAdmin();
+    return new HBaseBackupAdmin(TEST_UTIL.getConnection());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
index 62c47d6..280314b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
@@ -18,11 +18,11 @@
 
 package org.apache.hadoop.hbase.backup;
 
+import java.io.IOException;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.junit.Test;
@@ -63,7 +63,7 @@ public class TestBackupBoundaryTests extends TestBackupBase {
    * Verify that full backup fails on a single table that does not exist.
    * @throws Exception
    */
-  @Test(expected = DoNotRetryIOException.class)
+  @Test(expected = IOException.class)
   public void testFullBackupSingleDNE() throws Exception {
 
     LOG.info("test full backup fails on a single table that does not exist");
@@ -75,7 +75,7 @@ public class TestBackupBoundaryTests extends TestBackupBase {
    * Verify that full backup fails on multiple tables that do not exist.
    * @throws Exception
    */
-  @Test(expected = DoNotRetryIOException.class)
+  @Test(expected = IOException.class)
   public void testFullBackupMultipleDNE() throws Exception {
 
     LOG.info("test full backup fails on multiple tables that do not exist");
@@ -87,7 +87,7 @@ public class TestBackupBoundaryTests extends TestBackupBase {
    * Verify that full backup fails on tableset containing real and fake tables.
    * @throws Exception
    */
-  @Test(expected = DoNotRetryIOException.class)
+  @Test(expected = IOException.class)
   public void testFullBackupMixExistAndDNE() throws Exception {
     LOG.info("create full backup fails on tableset containing real and fake 
table");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
index abdf3c7..09c3833 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
@@ -20,7 +20,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.util.RestoreServerUtil;
-import org.apache.hadoop.hbase.client.BackupAdmin;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Table;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
index c39241e..1caba22 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
@@ -28,6 +28,7 @@ import java.util.Set;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -59,9 +60,10 @@ public class TestBackupMultipleDeletes extends 
TestBackupBase {
     HBaseAdmin admin = null;
     Connection conn = ConnectionFactory.createConnection(conf1);
     admin = (HBaseAdmin) conn.getAdmin();
+    BackupAdmin client = new HBaseBackupAdmin(conn);
     BackupRequest request = new BackupRequest();
     
request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);
-    String backupIdFull = admin.getBackupAdmin().backupTables(request);
+    String backupIdFull = client.backupTables(request);
     assertTrue(checkSucceeded(backupIdFull));
     // #2 - insert some data to table table1
     HTable t1 = (HTable) conn.getTable(table1);
@@ -78,7 +80,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase 
{
     request = new BackupRequest();
     request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
         .setTargetRootDir(BACKUP_ROOT_DIR);
-    String backupIdInc1 = admin.getBackupAdmin().backupTables(request);
+    String backupIdInc1 = client.backupTables(request);
     assertTrue(checkSucceeded(backupIdInc1));
     // #4 - insert some data to table table2
     HTable t2 = (HTable) conn.getTable(table2);
@@ -93,7 +95,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase 
{
     request = new BackupRequest();
     request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
         .setTargetRootDir(BACKUP_ROOT_DIR);
-    String backupIdInc2 = admin.getBackupAdmin().backupTables(request);
+    String backupIdInc2 = client.backupTables(request);
     assertTrue(checkSucceeded(backupIdInc2));
     // #6 - insert some data to table table1
     t1 = (HTable) conn.getTable(table1);
@@ -107,7 +109,7 @@ public class TestBackupMultipleDeletes extends 
TestBackupBase {
     request = new BackupRequest();
     request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
         .setTargetRootDir(BACKUP_ROOT_DIR);
-    String backupIdInc3 = admin.getBackupAdmin().backupTables(request);
+    String backupIdInc3 = client.backupTables(request);
     assertTrue(checkSucceeded(backupIdInc3));
     // #8 - insert some data to table table2
     t2 = (HTable) conn.getTable(table2);
@@ -121,25 +123,25 @@ public class TestBackupMultipleDeletes extends 
TestBackupBase {
     request = new BackupRequest();
     request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
         .setTargetRootDir(BACKUP_ROOT_DIR);
-    String backupIdInc4 = admin.getBackupAdmin().backupTables(request);
+    String backupIdInc4 = client.backupTables(request);
     assertTrue(checkSucceeded(backupIdInc4));
     // #10 full backup for table3
     tables = Lists.newArrayList(table3);
     request = new BackupRequest();
     
request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);
-    String backupIdFull2 = admin.getBackupAdmin().backupTables(request);
+    String backupIdFull2 = client.backupTables(request);
     assertTrue(checkSucceeded(backupIdFull2));
     // #11 - incremental backup for table3
     tables = Lists.newArrayList(table3);
     request = new BackupRequest();
     request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
         .setTargetRootDir(BACKUP_ROOT_DIR);
-    String backupIdInc5 = admin.getBackupAdmin().backupTables(request);
+    String backupIdInc5 = client.backupTables(request);
     assertTrue(checkSucceeded(backupIdInc5));
     LOG.error("Delete backupIdInc2");
-    admin.getBackupAdmin().deleteBackups(new String[] { backupIdInc2 });
+    client.deleteBackups(new String[] { backupIdInc2 });
     LOG.error("Delete backupIdInc2 done");
-    List<BackupInfo> list = admin.getBackupAdmin().getHistory(100);
+    List<BackupInfo> list = client.getHistory(100);
     // First check number of backup images before and after
     assertEquals(4, list.size());
     // then verify that no backupIdInc2,3,4

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
index 639aea4..a01801d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
@@ -21,7 +21,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.util.RestoreServerUtil;
-import org.apache.hadoop.hbase.client.BackupAdmin;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.util.ToolRunner;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
index fe00ac5..705a066 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
@@ -22,16 +22,19 @@ import static org.junit.Assert.assertTrue;
 
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Iterator;
 import java.util.List;
+import java.util.Map.Entry;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin;
 import org.apache.hadoop.hbase.backup.util.RestoreServerUtil;
-import org.apache.hadoop.hbase.client.BackupAdmin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -68,7 +71,7 @@ public class TestIncrementalBackup extends TestBackupBase {
   //implement all test cases in 1 test since incremental backup/restore has 
dependencies
   @Test
   public void TestIncBackupRestore() throws Exception {
-    
+
     int ADD_ROWS = 99;
     // #1 - create full backup for all tables
     LOG.info("create full backup image for all tables");
@@ -84,10 +87,11 @@ public class TestIncrementalBackup extends TestBackupBase {
 
     HBaseAdmin admin = null;
     admin = (HBaseAdmin) conn.getAdmin();
+    HBaseBackupAdmin client = new HBaseBackupAdmin(conn);
 
     BackupRequest request = new BackupRequest();
     
request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);
-    String backupIdFull = admin.getBackupAdmin().backupTables(request);
+    String backupIdFull = client.backupTables(request);
 
     assertTrue(checkSucceeded(backupIdFull));
 
@@ -95,12 +99,12 @@ public class TestIncrementalBackup extends TestBackupBase {
     HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
     LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
 
-    Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(
-        NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3));
+    Assert.assertThat(TEST_UTIL.countRows(t1),
+      CoreMatchers.equalTo(NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3));
     t1.close();
     LOG.debug("written " + ADD_ROWS + " rows to " + table1);
 
-    HTable t2 =  (HTable) conn.getTable(table2);
+    HTable t2 = (HTable) conn.getTable(table2);
     Put p2;
     for (int i = 0; i < 5; i++) {
       p2 = new Put(Bytes.toBytes("row-t2" + i));
@@ -111,29 +115,29 @@ public class TestIncrementalBackup extends TestBackupBase 
{
     Assert.assertThat(TEST_UTIL.countRows(t2), 
CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5));
     t2.close();
     LOG.debug("written " + 5 + " rows to " + table2);
-    // split table1 
+    // split table1
     MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
     List<HRegion> regions = cluster.getRegions(table1);
 
-    byte[] name = regions.get(0).getRegionInfo().getRegionName();    
+    byte[] name = regions.get(0).getRegionInfo().getRegionName();
     long startSplitTime = EnvironmentEdgeManager.currentTime();
     admin.splitRegion(name);
 
     while (!admin.isTableAvailable(table1)) {
       Thread.sleep(100);
     }
-    
+
     long endSplitTime = EnvironmentEdgeManager.currentTime();
 
-    // split finished 
-    LOG.debug("split finished in ="+ (endSplitTime - startSplitTime));
-    
+    // split finished
+    LOG.debug("split finished in =" + (endSplitTime - startSplitTime));
+
     // #3 - incremental backup for multiple tables
     tables = Lists.newArrayList(table1, table2);
     request = new BackupRequest();
     request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
-    .setTargetRootDir(BACKUP_ROOT_DIR);
-    String backupIdIncMultiple = admin.getBackupAdmin().backupTables(request);
+        .setTargetRootDir(BACKUP_ROOT_DIR);
+    String backupIdIncMultiple = client.backupTables(request);
     assertTrue(checkSucceeded(backupIdIncMultiple));
 
     // add column family f2 to table1
@@ -150,22 +154,18 @@ public class TestIncrementalBackup extends TestBackupBase 
{
     // #3 - incremental backup for multiple tables
     request = new BackupRequest();
     request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
-    .setTargetRootDir(BACKUP_ROOT_DIR);
-    String backupIdIncMultiple2 = admin.getBackupAdmin().backupTables(request);
+        .setTargetRootDir(BACKUP_ROOT_DIR);
+    String backupIdIncMultiple2 = client.backupTables(request);
     assertTrue(checkSucceeded(backupIdIncMultiple2));
 
     // #4 - restore full backup for all tables, without overwrite
-    TableName[] tablesRestoreFull =
-        new TableName[] { table1, table2 };
+    TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
 
-    TableName[] tablesMapFull =
-        new TableName[] { table1_restore, table2_restore };
+    TableName[] tablesMapFull = new TableName[] { table1_restore, 
table2_restore };
 
-    BackupAdmin client = getBackupAdmin();
     LOG.debug("Restoring full " + backupIdFull);
     client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, 
backupIdFull, false,
-      tablesRestoreFull,
-      tablesMapFull, false));
+      tablesRestoreFull, tablesMapFull, false));
 
     // #5.1 - check tables for full restore
     HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
@@ -176,8 +176,8 @@ public class TestIncrementalBackup extends TestBackupBase {
 
     // #5.2 - checking row count of tables for full restore
     HTable hTable = (HTable) conn.getTable(table1_restore);
-    Assert.assertThat(TEST_UTIL.countRows(hTable), 
CoreMatchers.equalTo(NB_ROWS_IN_BATCH +
-        NB_ROWS_FAM3));
+    Assert.assertThat(TEST_UTIL.countRows(hTable),
+      CoreMatchers.equalTo(NB_ROWS_IN_BATCH + NB_ROWS_FAM3));
     hTable.close();
 
     hTable = (HTable) conn.getTable(table2_restore);
@@ -185,18 +185,16 @@ public class TestIncrementalBackup extends TestBackupBase 
{
     hTable.close();
 
     // #6 - restore incremental backup for multiple tables, with overwrite
-    TableName[] tablesRestoreIncMultiple =
-        new TableName[] { table1, table2 };
-    TableName[] tablesMapIncMultiple =
-        new TableName[] { table1_restore, table2_restore };
+    TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
+    TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, 
table2_restore };
     client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, 
backupIdIncMultiple2,
-        false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
+      false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
 
     hTable = (HTable) conn.getTable(table1_restore);
     LOG.debug("After incremental restore: " + hTable.getTableDescriptor());
     LOG.debug("f1 has " + TEST_UTIL.countRows(hTable, famName) + " rows");
     Assert.assertThat(TEST_UTIL.countRows(hTable, famName),
-        CoreMatchers.equalTo(NB_ROWS_IN_BATCH + ADD_ROWS));
+      CoreMatchers.equalTo(NB_ROWS_IN_BATCH + ADD_ROWS));
     LOG.debug("f2 has " + TEST_UTIL.countRows(hTable, fam2Name) + " rows");
     Assert.assertThat(TEST_UTIL.countRows(hTable, fam2Name), 
CoreMatchers.equalTo(NB_ROWS_FAM2));
     hTable.close();
@@ -207,6 +205,7 @@ public class TestIncrementalBackup extends TestBackupBase {
 
     admin.close();
     conn.close();
+
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
index a7c0713..0a73888 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
@@ -25,8 +25,8 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin;
 import org.apache.hadoop.hbase.backup.util.RestoreServerUtil;
-import org.apache.hadoop.hbase.client.BackupAdmin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -63,10 +63,11 @@ public class TestIncrementalBackupDeleteTable extends 
TestBackupBase {
     HBaseAdmin admin = null;
     Connection conn = ConnectionFactory.createConnection(conf1);
     admin = (HBaseAdmin) conn.getAdmin();
+    HBaseBackupAdmin client = new HBaseBackupAdmin(conn);
 
     BackupRequest request = new BackupRequest();
     
request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);
-    String backupIdFull = admin.getBackupAdmin().backupTables(request);
+    String backupIdFull = client.backupTables(request);
 
     assertTrue(checkSucceeded(backupIdFull));
 
@@ -91,7 +92,7 @@ public class TestIncrementalBackupDeleteTable extends 
TestBackupBase {
     request = new BackupRequest();
     request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
     .setTargetRootDir(BACKUP_ROOT_DIR);
-    String backupIdIncMultiple = admin.getBackupAdmin().backupTables(request);
+    String backupIdIncMultiple = client.backupTables(request);
     assertTrue(checkSucceeded(backupIdIncMultiple));
 
     // #4 - restore full backup for all tables, without overwrite
@@ -101,7 +102,6 @@ public class TestIncrementalBackupDeleteTable extends 
TestBackupBase {
     TableName[] tablesMapFull =
         new TableName[] { table1_restore, table2_restore };
 
-    BackupAdmin client = getBackupAdmin();
     client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, 
backupIdFull, false,
       tablesRestoreFull,
       tablesMapFull, false));

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
index 4f4f7ad..42f0ee7 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
@@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.util.RestoreServerUtil;
-import org.apache.hadoop.hbase.client.BackupAdmin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b14e2ab1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 2a8e3c9..c88f60d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -431,23 +431,6 @@ public class TestCatalogJanitor {
       return null;  //To change body of implemented methods use File | 
Settings | File Templates.
     }
 
-    @Override
-    public Pair<Long, String> backupTables(
-        final BackupType type,
-        final List<TableName> tableList,
-        final String targetRootDir, final int workers,
-        final long bandwidth, final String setName,
-        final long nonceGroup, final long nonce) throws IOException {
-      return null;
-    }
-
-    @Override
-    public long restoreTables(String backupRootDir,
-        String backupId, boolean check, List<TableName> sTableList,
-        List<TableName> tTableList, boolean isOverwrite, long nonceGroup, long 
nonce)
-            throws IOException {
-      return -1;
-    }
 
     @Override
     public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) 
throws IOException {

Reply via email to