Repository: hbase
Updated Branches:
  refs/heads/HBASE-14123 [created] a4e03f60d


http://git-wip-us.apache.org/repos/asf/hbase/blob/3aaea8e0/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
new file mode 100644
index 0000000..299cd56
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless 
required by applicable
+ * law or agreed to in writing, software distributed under the License is 
distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied. See the License
+ * for the specific language governing permissions and limitations under the 
License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestRemoteBackup extends TestBackupBase {
+
+  private static final Log LOG = LogFactory.getLog(TestRemoteBackup.class);
+
+  /**
+   * Verify that a remote full backup is created on a single table with data 
correctly.
+   * @throws Exception
+   */
+  @Test
+  public void testFullBackupRemote() throws Exception {
+    LOG.info("test remote full backup on a single table");
+    final CountDownLatch latch = new CountDownLatch(1);
+    final int NB_ROWS_IN_FAM3 = 6;
+    final byte[] fam3Name = Bytes.toBytes("f3");
+    final byte[] fam2Name = Bytes.toBytes("f2");
+    final Connection conn = ConnectionFactory.createConnection(conf1);
+    Thread t = new Thread() {
+      @Override
+      public void run() {
+        try {
+          latch.await();
+        } catch (InterruptedException ie) {
+        }
+        try {
+          HTable t1 = (HTable) conn.getTable(table1);
+          Put p1;
+          for (int i = 0; i < NB_ROWS_IN_FAM3; i++) {
+            p1 = new Put(Bytes.toBytes("row-t1" + i));
+            p1.addColumn(fam3Name, qualName, Bytes.toBytes("val" + i));
+            t1.put(p1);
+          }
+          LOG.debug("Wrote " + NB_ROWS_IN_FAM3 + " rows into family3");
+          t1.close();
+        } catch (IOException ioe) {
+          throw new RuntimeException(ioe);
+        }
+      }
+    };
+    t.start();
+
+    table1Desc.addFamily(new HColumnDescriptor(fam3Name));
+    // family 2 is MOB enabled
+    HColumnDescriptor hcd = new HColumnDescriptor(fam2Name);
+    hcd.setMobEnabled(true);
+    hcd.setMobThreshold(0L);
+    table1Desc.addFamily(hcd);
+    HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
+
+    SnapshotTestingUtils.loadData(TEST_UTIL, table1, 50, fam2Name);
+    HTable t1 = (HTable) conn.getTable(table1);
+    int rows0 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
+
+    latch.countDown();
+    String backupId =
+        backupTables(BackupType.FULL, Lists.newArrayList(table1), 
BACKUP_REMOTE_ROOT_DIR);
+    assertTrue(checkSucceeded(backupId));
+
+    LOG.info("backup complete " + backupId);
+    Assert.assertEquals(TEST_UTIL.countRows(t1, famName), NB_ROWS_IN_BATCH);
+
+    t.join();
+    Assert.assertEquals(TEST_UTIL.countRows(t1, fam3Name), NB_ROWS_IN_FAM3);
+    t1.close();
+
+    TableName[] tablesRestoreFull = new TableName[] { table1 };
+
+    TableName[] tablesMapFull = new TableName[] { table1_restore };
+
+    BackupAdmin client = getBackupAdmin();
+    client.restore(BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, 
backupId, false,
+      tablesRestoreFull, tablesMapFull, false));
+
+    // check tables for full restore
+    HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
+    assertTrue(hAdmin.tableExists(table1_restore));
+
+    // #5.2 - checking row count of tables for full restore
+    HTable hTable = (HTable) conn.getTable(table1_restore);
+    Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), 
NB_ROWS_IN_BATCH);
+    int cnt3 = TEST_UTIL.countRows(hTable, fam3Name);
+    Assert.assertTrue(cnt3 >= 0 && cnt3 <= NB_ROWS_IN_FAM3);
+
+    int rows1 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
+    Assert.assertEquals(rows0, rows1);
+    hTable.close();
+
+    hAdmin.close();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aaea8e0/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
new file mode 100644
index 0000000..c7ed954
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless 
required by applicable
+ * law or agreed to in writing, software distributed under the License is 
distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied. See the License
+ * for the specific language governing permissions and limitations under the 
License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestRemoteRestore extends TestBackupBase {
+
+  private static final Log LOG = LogFactory.getLog(TestRemoteRestore.class);
+
+  /**
+   * Verify that a remote restore on a single table is successful.
+   * @throws Exception
+   */
+  @Test
+  public void testFullRestoreRemote() throws Exception {
+
+    LOG.info("test remote full backup on a single table");
+    String backupId =
+        backupTables(BackupType.FULL, toList(table1.getNameAsString()), 
BACKUP_REMOTE_ROOT_DIR);
+    LOG.info("backup complete");
+    TableName[] tableset = new TableName[] { table1 };
+    TableName[] tablemap = new TableName[] { table1_restore };
+    getBackupAdmin().restore(
+      BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, 
false, tableset,
+        tablemap, false));
+    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    assertTrue(hba.tableExists(table1_restore));
+    TEST_UTIL.deleteTable(table1_restore);
+    hba.close();
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aaea8e0/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
new file mode 100644
index 0000000..c61b018
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestRestoreBoundaryTests extends TestBackupBase {
+
+  private static final Log LOG = 
LogFactory.getLog(TestRestoreBoundaryTests.class);
+
+  /**
+   * Verify that a single empty table is restored to a new table
+   * @throws Exception
+   */
+  @Test
+  public void testFullRestoreSingleEmpty() throws Exception {
+    LOG.info("test full restore on a single table empty table");
+    String backupId = fullTableBackup(toList(table1.getNameAsString()));
+    LOG.info("backup complete");
+    TableName[] tableset = new TableName[] { table1 };
+    TableName[] tablemap = new TableName[] { table1_restore };
+    getBackupAdmin().restore(
+      BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, 
tableset, tablemap,
+        false));
+    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    assertTrue(hba.tableExists(table1_restore));
+    TEST_UTIL.deleteTable(table1_restore);
+  }
+
+  /**
+   * Verify that multiple tables are restored to new tables.
+   * @throws Exception
+   */
+  @Test
+  public void testFullRestoreMultipleEmpty() throws Exception {
+    LOG.info("create full backup image on multiple tables");
+
+    List<TableName> tables = toList(table2.getNameAsString(), 
table3.getNameAsString());
+    String backupId = fullTableBackup(tables);
+    TableName[] restore_tableset = new TableName[] { table2, table3 };
+    TableName[] tablemap = new TableName[] { table2_restore, table3_restore };
+    getBackupAdmin().restore(
+      BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, 
restore_tableset,
+        tablemap, false));
+    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    assertTrue(hba.tableExists(table2_restore));
+    assertTrue(hba.tableExists(table3_restore));
+    TEST_UTIL.deleteTable(table2_restore);
+    TEST_UTIL.deleteTable(table3_restore);
+    hba.close();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aaea8e0/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
new file mode 100644
index 0000000..6212ff2
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestSystemTableSnapshot extends TestBackupBase {
+
+  private static final Log LOG = 
LogFactory.getLog(TestSystemTableSnapshot.class);
+
+  /**
+   * Verify that a single table is restored to a new table
+   * @throws Exception
+   */
+  //@Test - Disabled until we get resolution on system table snapshots
+
+  public void _testBackupRestoreSystemTable() throws Exception {
+
+    LOG.info("test snapshot system table");
+
+    TableName backupSystem = BackupSystemTable.getTableName(conf1);
+
+    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    String snapshotName = "sysTable";
+    hba.snapshot(snapshotName, backupSystem);
+
+    hba.disableTable(backupSystem);
+    hba.restoreSnapshot(snapshotName);
+    hba.enableTable(backupSystem);
+    hba.close();
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aaea8e0/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
new file mode 100644
index 0000000..229597b
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.master;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.TestBackupBase;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestBackupLogCleaner extends TestBackupBase {
+  private static final Log LOG = LogFactory.getLog(TestBackupLogCleaner.class);
+
+  // implements all test cases in 1 test since incremental full backup/
+  // incremental backup has dependencies
+  @Test
+  public void testBackupLogCleaner() throws Exception {
+
+    // #1 - create full backup for all tables
+    LOG.info("create full backup image for all tables");
+
+    List<TableName> tableSetFullList = Lists.newArrayList(table1, table2, 
table3, table4);
+
+    try (BackupSystemTable systemTable = new 
BackupSystemTable(TEST_UTIL.getConnection())) {
+      // Verify that we have no backup sessions yet
+      assertFalse(systemTable.hasBackupSessions());
+
+      List<FileStatus> walFiles = 
getListOfWALFiles(TEST_UTIL.getConfiguration());
+      List<String> swalFiles = convert(walFiles);
+      BackupLogCleaner cleaner = new BackupLogCleaner();
+      cleaner.setConf(TEST_UTIL.getConfiguration());
+      cleaner.init(null);
+      cleaner.setConf(TEST_UTIL.getConfiguration());
+
+      Iterable<FileStatus> deletable = cleaner.getDeletableFiles(walFiles);
+      int size = Iterables.size(deletable);
+
+      // We can delete all files because we do not have yet recorded backup 
sessions
+      assertTrue(size == walFiles.size());
+
+      systemTable.addWALFiles(swalFiles, "backup", "root");
+      String backupIdFull = fullTableBackup(tableSetFullList);
+      assertTrue(checkSucceeded(backupIdFull));
+      // Check one more time
+      deletable = cleaner.getDeletableFiles(walFiles);
+      // We can delete wal files because they were saved into backup system 
table table
+      size = Iterables.size(deletable);
+      assertTrue(size == walFiles.size());
+
+      List<FileStatus> newWalFiles = 
getListOfWALFiles(TEST_UTIL.getConfiguration());
+      LOG.debug("WAL list after full backup");
+      convert(newWalFiles);
+
+      // New list of wal files is greater than the previous one,
+      // because new wal per RS have been opened after full backup
+      assertTrue(walFiles.size() < newWalFiles.size());
+      Connection conn = ConnectionFactory.createConnection(conf1);
+      // #2 - insert some data to table
+      HTable t1 = (HTable) conn.getTable(table1);
+      Put p1;
+      for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+        p1 = new Put(Bytes.toBytes("row-t1" + i));
+        p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+        t1.put(p1);
+      }
+
+      t1.close();
+
+      HTable t2 = (HTable) conn.getTable(table2);
+      Put p2;
+      for (int i = 0; i < 5; i++) {
+        p2 = new Put(Bytes.toBytes("row-t2" + i));
+        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+        t2.put(p2);
+      }
+
+      t2.close();
+
+      // #3 - incremental backup for multiple tables
+
+      List<TableName> tableSetIncList = Lists.newArrayList(table1, table2, 
table3);
+      String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, 
tableSetIncList,
+        BACKUP_ROOT_DIR);
+      assertTrue(checkSucceeded(backupIdIncMultiple));
+      deletable = cleaner.getDeletableFiles(newWalFiles);
+
+      assertTrue(Iterables.size(deletable) == newWalFiles.size());
+
+      conn.close();
+    }
+  }
+
+  private List<String> convert(List<FileStatus> walFiles) {
+    List<String> result = new ArrayList<String>();
+    for (FileStatus fs : walFiles) {
+      LOG.debug("+++WAL: " + fs.getPath().toString());
+      result.add(fs.getPath().toString());
+    }
+    return result;
+  }
+
+  private List<FileStatus> getListOfWALFiles(Configuration c) throws 
IOException {
+    Path logRoot = new Path(FSUtils.getRootDir(c), 
HConstants.HREGION_LOGDIR_NAME);
+    FileSystem fs = FileSystem.get(c);
+    RemoteIterator<LocatedFileStatus> it = fs.listFiles(logRoot, true);
+    List<FileStatus> logFiles = new ArrayList<FileStatus>();
+    while (it.hasNext()) {
+      LocatedFileStatus lfs = it.next();
+      if (lfs.isFile() && !AbstractFSWALProvider.isMetaFile(lfs.getPath())) {
+        logFiles.add(lfs);
+        LOG.info(lfs);
+      }
+    }
+    return logFiles;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aaea8e0/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java
index e71318b..296b38f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java
@@ -64,7 +64,7 @@ public class SimpleMasterProcedureManager extends 
MasterProcedureManager {
     // setup the default procedure coordinator
     String name = master.getServerName().toString();
     ThreadPoolExecutor tpool = ProcedureCoordinator.defaultPool(name, 1);
-    ProcedureCoordinatorRpcs comms = new ZKProcedureCoordinatorRpcs(
+    ProcedureCoordinatorRpcs comms = new ZKProcedureCoordinator(
         master.getZooKeeper(), getProcedureSignature(), name);
 
     this.coordinator = new ProcedureCoordinator(comms, tpool);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aaea8e0/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java
index 9a77ce5..f20c8a9 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java
@@ -127,7 +127,7 @@ public class TestZKProcedure {
     String opDescription = "coordination test - " + members.length + " cohort 
members";
 
     // start running the controller
-    ZKProcedureCoordinatorRpcs coordinatorComms = new 
ZKProcedureCoordinatorRpcs(
+    ZKProcedureCoordinator coordinatorComms = new ZKProcedureCoordinator(
         coordZkw, opDescription, COORDINATOR_NODE_NAME);
     ThreadPoolExecutor pool = 
ProcedureCoordinator.defaultPool(COORDINATOR_NODE_NAME, POOL_SIZE, KEEP_ALIVE);
     ProcedureCoordinator coordinator = new 
ProcedureCoordinator(coordinatorComms, pool) {
@@ -208,7 +208,7 @@ public class TestZKProcedure {
 
     // start running the coordinator and its controller
     ZooKeeperWatcher coordinatorWatcher = newZooKeeperWatcher();
-    ZKProcedureCoordinatorRpcs coordinatorController = new 
ZKProcedureCoordinatorRpcs(
+    ZKProcedureCoordinator coordinatorController = new ZKProcedureCoordinator(
         coordinatorWatcher, opDescription, COORDINATOR_NODE_NAME);
     ThreadPoolExecutor pool = 
ProcedureCoordinator.defaultPool(COORDINATOR_NODE_NAME, POOL_SIZE, KEEP_ALIVE);
     ProcedureCoordinator coordinator = spy(new 
ProcedureCoordinator(coordinatorController, pool));
@@ -393,7 +393,7 @@ public class TestZKProcedure {
 
   private void closeAll(
       ProcedureCoordinator coordinator,
-      ZKProcedureCoordinatorRpcs coordinatorController,
+      ZKProcedureCoordinator coordinatorController,
       List<Pair<ProcedureMember, ZKProcedureMemberRpcs>> cohort)
       throws IOException {
     // make sure we close all the resources

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aaea8e0/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java
index d864db2..e7e2b23 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hbase.procedure;
 
-import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
@@ -31,11 +31,11 @@ import java.util.concurrent.CountDownLatch;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.testclassification.MasterTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -188,9 +188,9 @@ public class TestZKProcedureControllers {
 
     ProcedureMember member = Mockito.mock(ProcedureMember.class);
 
-    Pair<ZKProcedureCoordinatorRpcs, List<ZKProcedureMemberRpcs>> pair = 
controllers
+    Pair<ZKProcedureCoordinator, List<ZKProcedureMemberRpcs>> pair = 
controllers
         .start(watcher, operationName, coordinator, CONTROLLER_NODE_NAME, 
member, expected);
-    ZKProcedureCoordinatorRpcs controller = pair.getFirst();
+    ZKProcedureCoordinator controller = pair.getFirst();
     List<ZKProcedureMemberRpcs> cohortControllers = pair.getSecond();
     // start the operation
     Procedure p = Mockito.mock(Procedure.class);
@@ -266,9 +266,9 @@ public class TestZKProcedureControllers {
     Procedure p = Mockito.mock(Procedure.class);
     Mockito.when(p.getName()).thenReturn(operationName);
 
-    Pair<ZKProcedureCoordinatorRpcs, List<ZKProcedureMemberRpcs>> pair = 
controllers
+    Pair<ZKProcedureCoordinator, List<ZKProcedureMemberRpcs>> pair = 
controllers
         .start(watcher, operationName, coordinator, CONTROLLER_NODE_NAME, 
member, expected);
-    ZKProcedureCoordinatorRpcs controller = pair.getFirst();
+    ZKProcedureCoordinator controller = pair.getFirst();
     List<ZKProcedureMemberRpcs> cohortControllers = pair.getSecond();
 
     // post 1/2 the prepare nodes early
@@ -383,7 +383,7 @@ public class TestZKProcedureControllers {
    * Specify how the controllers that should be started (not spy/mockable) for 
the test.
    */
   private abstract class StartControllers {
-    public abstract Pair<ZKProcedureCoordinatorRpcs, 
List<ZKProcedureMemberRpcs>> start(
+    public abstract Pair<ZKProcedureCoordinator, List<ZKProcedureMemberRpcs>> 
start(
         ZooKeeperWatcher watcher, String operationName,
         ProcedureCoordinator coordinator, String controllerName,
         ProcedureMember member, List<String> cohortNames) throws Exception;
@@ -392,12 +392,12 @@ public class TestZKProcedureControllers {
   private final StartControllers startCoordinatorFirst = new 
StartControllers() {
 
     @Override
-    public Pair<ZKProcedureCoordinatorRpcs, List<ZKProcedureMemberRpcs>> start(
+    public Pair<ZKProcedureCoordinator, List<ZKProcedureMemberRpcs>> start(
         ZooKeeperWatcher watcher, String operationName,
         ProcedureCoordinator coordinator, String controllerName,
         ProcedureMember member, List<String> expected) throws Exception {
       // start the controller
-      ZKProcedureCoordinatorRpcs controller = new ZKProcedureCoordinatorRpcs(
+      ZKProcedureCoordinator controller = new ZKProcedureCoordinator(
           watcher, operationName, CONTROLLER_NODE_NAME);
       controller.start(coordinator);
 
@@ -420,7 +420,7 @@ public class TestZKProcedureControllers {
   private final StartControllers startCohortFirst = new StartControllers() {
 
     @Override
-    public Pair<ZKProcedureCoordinatorRpcs, List<ZKProcedureMemberRpcs>> start(
+    public Pair<ZKProcedureCoordinator, List<ZKProcedureMemberRpcs>> start(
         ZooKeeperWatcher watcher, String operationName,
         ProcedureCoordinator coordinator, String controllerName,
         ProcedureMember member, List<String> expected) throws Exception {
@@ -434,7 +434,7 @@ public class TestZKProcedureControllers {
       }
 
       // start the controller
-      ZKProcedureCoordinatorRpcs controller = new ZKProcedureCoordinatorRpcs(
+      ZKProcedureCoordinator controller = new ZKProcedureCoordinator(
           watcher, operationName, CONTROLLER_NODE_NAME);
       controller.start(coordinator);
 

Reply via email to