http://git-wip-us.apache.org/repos/asf/hbase/blob/a98f5295/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
index a381cb9..05de958 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
@@ -36,8 +36,10 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
@@ -123,60 +125,60 @@ public class TestMasterOperationsForRegionReplicas {
           assert (state != null);
         }
       }
-      // TODO: HBASE-10351 should uncomment the following tests (since the 
tests assume region placements are handled)
-//      List<Result> metaRows = MetaReader.fullScan(ct);
-//      int numRows = 0;
-//      for (Result result : metaRows) {
-//        RegionLocations locations = MetaReader.getRegionLocations(result);
-//        HRegionInfo hri = locations.getRegionLocation().getRegionInfo();
-//        if (!hri.getTable().equals(table)) continue;
-//        numRows += 1;
-//        HRegionLocation[] servers = locations.getRegionLocations();
-//        // have two locations for the replicas of a region, and the 
locations should be different
-//        assert(servers.length == 2);
-//        assert(!servers[0].equals(servers[1]));
-//      }
-//      assert(numRows == numRegions);
-//
-//      // The same verification of the meta as above but with the 
SnapshotOfRegionAssignmentFromMeta
-//      // class
-//      validateFromSnapshotFromMeta(table, numRegions, numReplica, ct);
-//
-//      // Now kill the master, restart it and see if the assignments are kept
-//      ServerName master = 
TEST_UTIL.getHBaseClusterInterface().getClusterStatus().getMaster();
-//      TEST_UTIL.getHBaseClusterInterface().stopMaster(master);
-//      TEST_UTIL.getHBaseClusterInterface().waitForMasterToStop(master, 
30000);
-//      TEST_UTIL.getHBaseClusterInterface().startMaster(master.getHostname());
-//      TEST_UTIL.getHBaseClusterInterface().waitForActiveAndReadyMaster();
-//      for (int i = 0; i < numRegions; i++) {
-//        for (int j = 0; j < numReplica; j++) {
-//          HRegionInfo replica = 
RegionReplicaUtil.getRegionInfoForReplica(hris.get(i), j);
-//          RegionState state = 
TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
-//              .getRegionStates().getRegionState(replica);
-//          assert (state != null);
-//        }
-//      }
-//      validateFromSnapshotFromMeta(table, numRegions, numReplica, ct);
-//
-//      // Now shut the whole cluster down, and verify the assignments are 
kept so that the
-//      // availability constraints are met.
-//      
TEST_UTIL.getConfiguration().setBoolean("hbase.master.startup.retainassign", 
true);
-//      TEST_UTIL.shutdownMiniHBaseCluster();
-//      TEST_UTIL.startMiniHBaseCluster(1, numSlaves);
-//      TEST_UTIL.waitTableEnabled(table.getName());
-//      ct = new CatalogTracker(TEST_UTIL.getConfiguration());
-//      validateFromSnapshotFromMeta(table, numRegions, numReplica, ct);
-//
-//      // Now shut the whole cluster down, and verify regions are assigned 
even if there is only
-//      // one server running
-//      TEST_UTIL.shutdownMiniHBaseCluster();
-//      TEST_UTIL.startMiniHBaseCluster(1, 1);
-//      TEST_UTIL.waitTableEnabled(table.getName());
-//      ct = new CatalogTracker(TEST_UTIL.getConfiguration());
-//      validateSingleRegionServerAssignment(ct, numRegions, numReplica);
-//      for (int i = 1; i < numSlaves; i++) { //restore the cluster
-//        TEST_UTIL.getMiniHBaseCluster().startRegionServer();
-//      }
+
+      List<Result> metaRows = MetaReader.fullScan(ct);
+      int numRows = 0;
+      for (Result result : metaRows) {
+        RegionLocations locations = MetaReader.getRegionLocations(result);
+        HRegionInfo hri = locations.getRegionLocation().getRegionInfo();
+        if (!hri.getTable().equals(table)) continue;
+        numRows += 1;
+        HRegionLocation[] servers = locations.getRegionLocations();
+        // have two locations for the replicas of a region, and the locations 
should be different
+        assert(servers.length == 2);
+        assert(!servers[0].equals(servers[1]));
+      }
+      assert(numRows == numRegions);
+
+      // The same verification of the meta as above but with the 
SnapshotOfRegionAssignmentFromMeta
+      // class
+      validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica, 
ct);
+
+      // Now kill the master, restart it and see if the assignments are kept
+      ServerName master = 
TEST_UTIL.getHBaseClusterInterface().getClusterStatus().getMaster();
+      TEST_UTIL.getHBaseClusterInterface().stopMaster(master);
+      TEST_UTIL.getHBaseClusterInterface().waitForMasterToStop(master, 30000);
+      TEST_UTIL.getHBaseClusterInterface().startMaster(master.getHostname());
+      TEST_UTIL.getHBaseClusterInterface().waitForActiveAndReadyMaster();
+      for (int i = 0; i < numRegions; i++) {
+        for (int j = 0; j < numReplica; j++) {
+          HRegionInfo replica = 
RegionReplicaUtil.getRegionInfoForReplica(hris.get(i), j);
+          RegionState state = 
TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
+              .getRegionStates().getRegionState(replica);
+          assert (state != null);
+        }
+      }
+      validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica, 
ct);
+
+      // Now shut the whole cluster down, and verify the assignments are kept 
so that the
+      // availability constraints are met.
+      
TEST_UTIL.getConfiguration().setBoolean("hbase.master.startup.retainassign", 
true);
+      TEST_UTIL.shutdownMiniHBaseCluster();
+      TEST_UTIL.startMiniHBaseCluster(1, numSlaves);
+      TEST_UTIL.waitTableEnabled(table.getName());
+      ct = new CatalogTracker(TEST_UTIL.getConfiguration());
+      validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica, 
ct);
+
+      // Now shut the whole cluster down, and verify regions are assigned even 
if there is only
+      // one server running
+      TEST_UTIL.shutdownMiniHBaseCluster();
+      TEST_UTIL.startMiniHBaseCluster(1, 1);
+      TEST_UTIL.waitTableEnabled(table.getName());
+      ct = new CatalogTracker(TEST_UTIL.getConfiguration());
+      validateSingleRegionServerAssignment(ct, numRegions, numReplica);
+      for (int i = 1; i < numSlaves; i++) { //restore the cluster
+        TEST_UTIL.getMiniHBaseCluster().startRegionServer();
+      }
 
       //check on alter table
       admin.disableTable(table);
@@ -288,7 +290,7 @@ public class TestMasterOperationsForRegionReplicas {
     assert(count.get() == numRegions);
   }
 
-  private void validateFromSnapshotFromMeta(TableName table, int numRegions,
+  private void validateFromSnapshotFromMeta(HBaseTestingUtility util, 
TableName table, int numRegions,
       int numReplica, CatalogTracker ct) throws IOException {
     SnapshotOfRegionAssignmentFromMeta snapshot = new 
SnapshotOfRegionAssignmentFromMeta(ct);
     snapshot.initialize();
@@ -296,6 +298,9 @@ public class TestMasterOperationsForRegionReplicas {
     assert(regionToServerMap.size() == numRegions * numReplica + 1); //'1' for 
the namespace
     Map<ServerName, List<HRegionInfo>> serverToRegionMap = 
snapshot.getRegionServerToRegionMap();
     for (Map.Entry<ServerName, List<HRegionInfo>> entry : 
serverToRegionMap.entrySet()) {
+      if 
(entry.getKey().equals(util.getHBaseCluster().getMaster().getServerName())) {
+        continue;
+      }
       List<HRegionInfo> regions = entry.getValue();
       Set<byte[]> setOfStartKeys = new HashSet<byte[]>();
       for (HRegionInfo region : regions) {
@@ -307,7 +312,7 @@ public class TestMasterOperationsForRegionReplicas {
       }
       // the number of startkeys will be equal to the number of regions hosted 
in each server
       // (each server will be hosting one replica of a region)
-      assertEquals(setOfStartKeys.size() , numRegions);
+      assertEquals(numRegions, setOfStartKeys.size());
     }
   }
 
@@ -316,9 +321,14 @@ public class TestMasterOperationsForRegionReplicas {
     SnapshotOfRegionAssignmentFromMeta snapshot = new 
SnapshotOfRegionAssignmentFromMeta(ct);
     snapshot.initialize();
     Map<HRegionInfo, ServerName>  regionToServerMap = 
snapshot.getRegionToRegionServerMap();
-    assert(regionToServerMap.size() == numRegions * numReplica + 1); //'1' for 
the namespace
+    assertEquals(regionToServerMap.size(), numRegions * numReplica + 1); //'1' 
for the namespace
     Map<ServerName, List<HRegionInfo>> serverToRegionMap = 
snapshot.getRegionServerToRegionMap();
-    assert(serverToRegionMap.keySet().size() == 1);
-    assert(serverToRegionMap.values().iterator().next().size() == numRegions * 
numReplica + 1);
+    assertEquals(serverToRegionMap.keySet().size(), 2); // 1 rs + 1 master
+    for (Map.Entry<ServerName, List<HRegionInfo>> entry : 
serverToRegionMap.entrySet()) {
+      if 
(entry.getKey().equals(TEST_UTIL.getHBaseCluster().getMaster().getServerName()))
 {
+        continue;
+      }
+      assertEquals(entry.getValue().size(), numRegions * numReplica);
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a98f5295/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java
index a7d678d..7216abd 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java
@@ -21,20 +21,26 @@ import static org.junit.Assert.assertTrue;
 
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Queue;
 import java.util.Random;
+import java.util.Set;
 import java.util.SortedSet;
 import java.util.TreeMap;
 import java.util.TreeSet;
 
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+import org.apache.hadoop.hbase.master.RackManager;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Assert;
 
 /**
  * Class used to be the base of unit tests on load balancers. It gives helper
@@ -80,6 +86,50 @@ public class BalancerTestBase {
     }
   }
 
+  /**
+   * Checks whether region replicas are not hosted on the same host.
+   */
+  public void assertRegionReplicaPlacement(Map<ServerName, List<HRegionInfo>> 
serverMap, RackManager rackManager) {
+    TreeMap<String, Set<HRegionInfo>> regionsPerHost = new TreeMap<String, 
Set<HRegionInfo>>();
+    TreeMap<String, Set<HRegionInfo>> regionsPerRack = new TreeMap<String, 
Set<HRegionInfo>>();
+
+    for (Entry<ServerName, List<HRegionInfo>> entry : serverMap.entrySet()) {
+      String hostname = entry.getKey().getHostname();
+      Set<HRegionInfo> infos = regionsPerHost.get(hostname);
+      if (infos == null) {
+        infos = new HashSet<HRegionInfo>();
+        regionsPerHost.put(hostname, infos);
+      }
+
+      for (HRegionInfo info : entry.getValue()) {
+        HRegionInfo primaryInfo = 
RegionReplicaUtil.getRegionInfoForDefaultReplica(info);
+        if (!infos.add(primaryInfo)) {
+          Assert.fail("Two or more region replicas are hosted on the same host 
after balance");
+        }
+      }
+    }
+
+    if (rackManager == null) {
+      return;
+    }
+
+    for (Entry<ServerName, List<HRegionInfo>> entry : serverMap.entrySet()) {
+      String rack = rackManager.getRack(entry.getKey());
+      Set<HRegionInfo> infos = regionsPerRack.get(rack);
+      if (infos == null) {
+        infos = new HashSet<HRegionInfo>();
+        regionsPerRack.put(rack, infos);
+      }
+
+      for (HRegionInfo info : entry.getValue()) {
+        HRegionInfo primaryInfo = 
RegionReplicaUtil.getRegionInfoForDefaultReplica(info);
+        if (!infos.add(primaryInfo)) {
+          Assert.fail("Two or more region replicas are hosted on the same rack 
after balance");
+        }
+      }
+    }
+  }
+
   protected String printStats(List<ServerAndLoad> servers) {
     int numServers = servers.size();
     int totalRegions = 0;
@@ -159,18 +209,18 @@ public class BalancerTestBase {
     map.put(sn, sal);
   }
 
-  protected Map<ServerName, List<HRegionInfo>> mockClusterServers(int[] 
mockCluster) {
+  protected TreeMap<ServerName, List<HRegionInfo>> mockClusterServers(int[] 
mockCluster) {
     return mockClusterServers(mockCluster, -1);
   }
 
   protected BaseLoadBalancer.Cluster mockCluster(int[] mockCluster) {
     return new BaseLoadBalancer.Cluster(null,
-      mockClusterServers(mockCluster, -1), null, null, null, null);
+      mockClusterServers(mockCluster, -1), null, null, null, null, null);
   }
 
-  protected Map<ServerName, List<HRegionInfo>> mockClusterServers(int[] 
mockCluster, int numTables) {
+  protected TreeMap<ServerName, List<HRegionInfo>> mockClusterServers(int[] 
mockCluster, int numTables) {
     int numServers = mockCluster.length;
-    Map<ServerName, List<HRegionInfo>> servers = new TreeMap<ServerName, 
List<HRegionInfo>>();
+    TreeMap<ServerName, List<HRegionInfo>> servers = new TreeMap<ServerName, 
List<HRegionInfo>>();
     for (int i = 0; i < numServers; i++) {
       int numRegions = mockCluster[i];
       ServerAndLoad sal = randomServer(0);
@@ -218,7 +268,7 @@ public class BalancerTestBase {
       ServerName sn = this.serverQueue.poll();
       return new ServerAndLoad(sn, numRegionsPerServer);
     }
-    String host = "srv" + rand.nextInt(100000);
+    String host = "srv" + rand.nextInt(Integer.MAX_VALUE);
     int port = rand.nextInt(60000);
     long startCode = rand.nextLong();
     ServerName sn = ServerName.valueOf(host, port, startCode);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a98f5295/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
index 7bd0b71..0507e26 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
@@ -26,6 +26,7 @@ import static org.mockito.Mockito.when;
 
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -46,6 +47,10 @@ import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+import org.apache.hadoop.hbase.master.RackManager;
+import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -57,8 +62,11 @@ import com.google.common.collect.Lists;
 public class TestBaseLoadBalancer extends BalancerTestBase {
 
   private static LoadBalancer loadBalancer;
-  private static final Log LOG = 
LogFactory.getLog(TestStochasticLoadBalancer.class);
+  private static final Log LOG = LogFactory.getLog(TestBaseLoadBalancer.class);
   private static final ServerName master = ServerName.valueOf("fake-master", 
0, 1L);
+  private static RackManager rackManager;
+  private static final int NUM_SERVERS = 15;
+  private static ServerName[] servers = new ServerName[NUM_SERVERS];
 
   int[][] regionsAndServersMocks = new int[][] {
       // { num regions, num servers }
@@ -75,6 +83,21 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
     MasterServices st = Mockito.mock(MasterServices.class);
     Mockito.when(st.getServerName()).thenReturn(master);
     loadBalancer.setMasterServices(st);
+
+    // Set up the rack topologies (5 machines per rack)
+    rackManager = Mockito.mock(RackManager.class);
+    for (int i = 0; i < NUM_SERVERS; i++) {
+      servers[i] = ServerName.valueOf("foo"+i+":1234",-1);
+      if (i < 5) {
+        Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack1");
+      }
+      if (i >= 5 && i < 10) {
+        Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack2");
+      }
+      if (i >= 10) {
+        Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack3");
+      }
+    }
   }
 
   public static class MockBalancer extends BaseLoadBalancer {
@@ -214,6 +237,138 @@ public class TestBaseLoadBalancer extends 
BalancerTestBase {
     assertRetainedAssignment(existing, listOfServerNames, assignment);
   }
 
+  @Test
+  public void testRegionAvailability() throws Exception {
+    // Create a cluster with a few servers, assign them to specific racks
+    // then assign some regions. The tests should check whether moving a
+    // replica from one node to a specific other node or rack lowers the
+    // availability of the region or not
+
+    List<HRegionInfo> list0 = new ArrayList<HRegionInfo>();
+    List<HRegionInfo> list1 = new ArrayList<HRegionInfo>();
+    List<HRegionInfo> list2 = new ArrayList<HRegionInfo>();
+    // create a region (region1)
+    HRegionInfo hri1 = new HRegionInfo(
+        TableName.valueOf("table"), "key1".getBytes(), "key2".getBytes(),
+        false, 100);
+    // create a replica of the region (replica_of_region1)
+    HRegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
+    // create a second region (region2)
+    HRegionInfo hri3 = new HRegionInfo(
+        TableName.valueOf("table"), "key2".getBytes(), "key3".getBytes(),
+        false, 101);
+    list0.add(hri1); //only region1
+    list1.add(hri2); //only replica_of_region1
+    list2.add(hri3); //only region2
+    Map<ServerName, List<HRegionInfo>> clusterState =
+        new LinkedHashMap<ServerName, List<HRegionInfo>>();
+    clusterState.put(servers[0], list0); //servers[0] hosts region1
+    clusterState.put(servers[1], list1); //servers[1] hosts replica_of_region1
+    clusterState.put(servers[2], list2); //servers[2] hosts region2
+    // create a cluster with the above clusterState. The way in which the
+    // cluster is created (constructor code) would make sure the indices of
+    // the servers are in the order in which it is inserted in the clusterState
+    // map (linkedhashmap is important). A similar thing applies to the region 
lists
+    Cluster cluster = new Cluster(master, clusterState, null, null, null, 
null, rackManager);
+    // check whether a move of region1 from servers[0] to servers[1] would 
lower
+    // the availability of region1
+    assertTrue(cluster.wouldLowerAvailability(hri1, servers[1]));
+    // check whether a move of region1 from servers[0] to servers[2] would 
lower
+    // the availability of region1
+    assertTrue(!cluster.wouldLowerAvailability(hri1, servers[2]));
+    // check whether a move of replica_of_region1 from servers[0] to 
servers[2] would lower
+    // the availability of replica_of_region1
+    assertTrue(!cluster.wouldLowerAvailability(hri2, servers[2]));
+    // check whether a move of region2 from servers[0] to servers[1] would 
lower
+    // the availability of region2
+    assertTrue(!cluster.wouldLowerAvailability(hri3, servers[1]));
+
+    // now lets have servers[1] host replica_of_region2
+    list1.add(RegionReplicaUtil.getRegionInfoForReplica(hri3, 1));
+    // create a new clusterState with the above change
+    cluster = new Cluster(master, clusterState, null, null, null, null, 
rackManager);
+    // now check whether a move of a replica from servers[0] to servers[1] 
would lower
+    // the availability of region2
+    assertTrue(cluster.wouldLowerAvailability(hri3, servers[1]));
+
+    // start over again
+    clusterState.clear();
+    clusterState.put(servers[0], list0); //servers[0], rack1 hosts region1
+    clusterState.put(servers[5], list1); //servers[5], rack2 hosts 
replica_of_region1 and replica_of_region2
+    clusterState.put(servers[6], list2); //servers[6], rack2 hosts region2
+    clusterState.put(servers[10], new ArrayList<HRegionInfo>()); 
//servers[10], rack3 hosts no region
+    // create a cluster with the above clusterState
+    cluster = new Cluster(master, clusterState, null, null, null, null, 
rackManager);
+    // check whether a move of region1 from servers[0],rack1 to 
servers[6],rack2 would
+    // lower the availability
+
+    assertTrue(cluster.wouldLowerAvailability(hri1, servers[0]));
+
+    // now create a cluster without the rack manager
+    cluster = new Cluster(master, clusterState, null, null, null, null, null);
+    // now repeat check whether a move of region1 from servers[0] to 
servers[6] would
+    // lower the availability
+    assertTrue(!cluster.wouldLowerAvailability(hri1, servers[6]));
+  }
+
+  @Test
+  public void testRegionAvailabilityWithRegionMoves() throws Exception {
+    List<HRegionInfo> list0 = new ArrayList<HRegionInfo>();
+    List<HRegionInfo> list1 = new ArrayList<HRegionInfo>();
+    List<HRegionInfo> list2 = new ArrayList<HRegionInfo>();
+    // create a region (region1)
+    HRegionInfo hri1 = new HRegionInfo(
+        TableName.valueOf("table"), "key1".getBytes(), "key2".getBytes(),
+        false, 100);
+    // create a replica of the region (replica_of_region1)
+    HRegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
+    // create a second region (region2)
+    HRegionInfo hri3 = new HRegionInfo(
+        TableName.valueOf("table"), "key2".getBytes(), "key3".getBytes(),
+        false, 101);
+    list0.add(hri1); //only region1
+    list1.add(hri2); //only replica_of_region1
+    list2.add(hri3); //only region2
+    Map<ServerName, List<HRegionInfo>> clusterState =
+        new LinkedHashMap<ServerName, List<HRegionInfo>>();
+    clusterState.put(servers[0], list0); //servers[0] hosts region1
+    clusterState.put(servers[1], list1); //servers[1] hosts replica_of_region1
+    clusterState.put(servers[2], list2); //servers[2] hosts region2
+    // create a cluster with the above clusterState. The way in which the
+    // cluster is created (constructor code) would make sure the indices of
+    // the servers are in the order in which it is inserted in the clusterState
+    // map (linkedhashmap is important).
+    Cluster cluster = new Cluster(master, clusterState, null, null, null, 
null, rackManager);
+    // check whether moving region1 from servers[1] to servers[2] would lower 
availability
+    assertTrue(!cluster.wouldLowerAvailability(hri1, servers[2]));
+
+    // now move region1 from servers[0] to servers[2]
+    cluster.doAction(new MoveRegionAction(0, 0, 2));
+    // now repeat check whether moving region1 from servers[1] to servers[2]
+    // would lower availability
+    assertTrue(cluster.wouldLowerAvailability(hri1, servers[2]));
+
+    // start over again
+    clusterState.clear();
+    List<HRegionInfo> list3 = new ArrayList<HRegionInfo>();
+    HRegionInfo hri4 = RegionReplicaUtil.getRegionInfoForReplica(hri3, 1);
+    list3.add(hri4);
+    clusterState.put(servers[0], list0); //servers[0], rack1 hosts region1
+    clusterState.put(servers[5], list1); //servers[5], rack2 hosts 
replica_of_region1
+    clusterState.put(servers[6], list2); //servers[6], rack2 hosts region2
+    clusterState.put(servers[12], list3); //servers[12], rack3 hosts 
replica_of_region2
+    // create a cluster with the above clusterState
+    cluster = new Cluster(master, clusterState, null, null, null, null, 
rackManager);
+    // check whether a move of replica_of_region2 from servers[12],rack3 to 
servers[0],rack1 would
+    // lower the availability
+    assertTrue(!cluster.wouldLowerAvailability(hri4, servers[0]));
+    // now move region2 from servers[6],rack2 to servers[0],rack1
+    cluster.doAction(new MoveRegionAction(2, 2, 0));
+    // now repeat check if replica_of_region2 from servers[12],rack3 to 
servers[0],rack1 would
+    // lower the availability
+    assertTrue(cluster.wouldLowerAvailability(hri3, servers[0]));
+  }
+
   private List<ServerName> getListOfServerNames(final List<ServerAndLoad> 
sals) {
     List<ServerName> list = new ArrayList<ServerName>();
     for (ServerAndLoad e : sals) {
@@ -289,7 +444,7 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
     assignRegions(regions, oldServers, clusterState);
 
     // should not throw exception:
-    BaseLoadBalancer.Cluster cluster = new Cluster(null, clusterState, null, 
null, null, null);
+    BaseLoadBalancer.Cluster cluster = new Cluster(null, clusterState, null, 
null, null, null, null);
     assertEquals(101 + 9, cluster.numRegions);
     assertEquals(10, cluster.numServers); // only 10 servers because they 
share the same host + port
   }
@@ -331,7 +486,7 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
     when(locationFinder.getTopBlockLocations(regions.get(43))).thenReturn(
       Lists.newArrayList(ServerName.valueOf("foo", 0, 0))); // this server 
does not exists in clusterStatus
 
-    BaseLoadBalancer.Cluster cluster = new Cluster(null, clusterState, null, 
locationFinder, null, null);
+    BaseLoadBalancer.Cluster cluster = new Cluster(null, clusterState, null, 
locationFinder, null, null, null);
 
     int r0 = ArrayUtils.indexOf(cluster.regions, regions.get(0)); // this is 
ok, it is just a test
     int r1 = ArrayUtils.indexOf(cluster.regions, regions.get(1));

http://git-wip-us.apache.org/repos/asf/hbase/blob/a98f5295/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
index e6c7319..6aee367 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
@@ -17,10 +17,19 @@
  */
 package org.apache.hadoop.hbase.master.balancer;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Queue;
 import java.util.TreeMap;
 
@@ -34,29 +43,30 @@ import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+import org.apache.hadoop.hbase.master.RackManager;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.net.DNSToSwitchMapping;
+import org.apache.hadoop.net.NetworkTopology;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
 @Category(MediumTests.class)
 public class TestStochasticLoadBalancer extends BalancerTestBase {
   public static final String REGION_KEY = "testRegion";
   private static StochasticLoadBalancer loadBalancer;
   private static final Log LOG = 
LogFactory.getLog(TestStochasticLoadBalancer.class);
+  private static Configuration conf;
+  private static final ServerName master = ServerName.valueOf("fake-master", 
0, 1L);
 
   @BeforeClass
   public static void beforeAllTests() throws Exception {
-    Configuration conf = HBaseConfiguration.create();
+    conf = HBaseConfiguration.create();
     conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 0.75f);
+    conf.setClass("hbase.util.ip.to.rack.determiner",
+        MyRackResolver.class, DNSToSwitchMapping.class);
     loadBalancer = new StochasticLoadBalancer();
     loadBalancer.setConf(conf);
   }
@@ -187,22 +197,29 @@ public class TestStochasticLoadBalancer extends 
BalancerTestBase {
     StochasticLoadBalancer.CostFunction
         costFunction = new 
StochasticLoadBalancer.RegionCountSkewCostFunction(conf, 1, 1);
     for (int[] mockCluster : clusterStateMocks) {
-      double cost = costFunction.cost(mockCluster(mockCluster));
+      costFunction.init(mockCluster(mockCluster));
+      double cost = costFunction.cost();
       assertTrue(cost >= 0);
       assertTrue(cost <= 1.01);
     }
+    costFunction.init(mockCluster(new int[]{0, 0, 0, 0, 1}));
     assertEquals(1,
-        costFunction.cost(mockCluster(new int[]{0, 0, 0, 0, 1})), 0.01);
+        costFunction.cost(), 0.01);
+    costFunction.init(mockCluster(new int[]{0, 0, 0, 1, 1}));
     assertEquals(.75,
-        costFunction.cost(mockCluster(new int[]{0, 0, 0, 1, 1})), 0.01);
+        costFunction.cost(), 0.01);
+    costFunction.init(mockCluster(new int[]{0, 0, 1, 1, 1}));
     assertEquals(.5,
-        costFunction.cost(mockCluster(new int[]{0, 0, 1, 1, 1})), 0.01);
+        costFunction.cost(), 0.01);
+    costFunction.init(mockCluster(new int[]{0, 1, 1, 1, 1}));
     assertEquals(.25,
-        costFunction.cost(mockCluster(new int[]{0, 1, 1, 1, 1})), 0.01);
+        costFunction.cost(), 0.01);
+    costFunction.init(mockCluster(new int[]{1, 1, 1, 1, 1}));
     assertEquals(0,
-        costFunction.cost(mockCluster(new int[]{1, 1, 1, 1, 1})), 0.01);
+        costFunction.cost(), 0.01);
+    costFunction.init(mockCluster(new int[]{10, 10, 10, 10, 10}));
     assertEquals(0,
-        costFunction.cost(mockCluster(new int[]{10, 10, 10, 10, 10})), 0.01);
+        costFunction.cost(), 0.01);
   }
 
   @Test
@@ -212,7 +229,8 @@ public class TestStochasticLoadBalancer extends 
BalancerTestBase {
         costFunction = new StochasticLoadBalancer.TableSkewCostFunction(conf);
     for (int[] mockCluster : clusterStateMocks) {
       BaseLoadBalancer.Cluster cluster = mockCluster(mockCluster);
-      double cost = costFunction.cost(cluster);
+      costFunction.init(cluster);
+      double cost = costFunction.cost();
       assertTrue(cost >= 0);
       assertTrue(cost <= 1.01);
     }
@@ -250,10 +268,11 @@ public class TestStochasticLoadBalancer extends 
BalancerTestBase {
     int numNodes = 3;
     int numRegions = 20;
     int numRegionsPerServer = 3; //all servers except one
+    int replication = 1;
     int numTables = 2;
 
     Map<ServerName, List<HRegionInfo>> serverMap =
-        createServerMap(numNodes, numRegions, numRegionsPerServer, numTables);
+        createServerMap(numNodes, numRegions, numRegionsPerServer, 
replication, numTables);
     List<ServerAndLoad> list = convertToList(serverMap);
 
 
@@ -275,13 +294,103 @@ public class TestStochasticLoadBalancer extends 
BalancerTestBase {
     assertNull(plans);
   }
 
+  @Test
+  public void testReplicaCost() {
+    Configuration conf = HBaseConfiguration.create();
+    StochasticLoadBalancer.CostFunction
+        costFunction = new 
StochasticLoadBalancer.RegionReplicaHostCostFunction(conf);
+    for (int[] mockCluster : clusterStateMocks) {
+      BaseLoadBalancer.Cluster cluster = mockCluster(mockCluster);
+      costFunction.init(cluster);
+      double cost = costFunction.cost();
+      assertTrue(cost >= 0);
+      assertTrue(cost <= 1.01);
+    }
+  }
+
+  @Test
+  public void testReplicaCostForReplicas() {
+    Configuration conf = HBaseConfiguration.create();
+    StochasticLoadBalancer.CostFunction
+        costFunction = new 
StochasticLoadBalancer.RegionReplicaHostCostFunction(conf);
+
+    int [] servers = new int[] {3,3,3,3,3};
+    TreeMap<ServerName, List<HRegionInfo>> clusterState = 
mockClusterServers(servers);
+
+    BaseLoadBalancer.Cluster cluster;
+
+    cluster = new BaseLoadBalancer.Cluster(master, clusterState, null, null, 
null, null, null);
+    costFunction.init(cluster);
+    double costWithoutReplicas = costFunction.cost();
+    assertEquals(0, costWithoutReplicas, 0);
+
+    // replicate the region from first server to the last server
+    HRegionInfo replica1 = RegionReplicaUtil.getRegionInfoForReplica(
+      clusterState.firstEntry().getValue().get(0),1);
+    clusterState.lastEntry().getValue().add(replica1);
+
+    cluster = new BaseLoadBalancer.Cluster(master, clusterState, null, null, 
null, null, null);
+    costFunction.init(cluster);
+    double costWith1ReplicaDifferentServer = costFunction.cost();
+
+    assertEquals(0, costWith1ReplicaDifferentServer, 0);
+
+    // add a third replica to the last server
+    HRegionInfo replica2 = RegionReplicaUtil.getRegionInfoForReplica(replica1, 
2);
+    clusterState.lastEntry().getValue().add(replica2);
+
+    cluster = new BaseLoadBalancer.Cluster(master, clusterState, null, null, 
null, null, null);
+    costFunction.init(cluster);
+    double costWith1ReplicaSameServer = costFunction.cost();
+
+    assertTrue(costWith1ReplicaDifferentServer < costWith1ReplicaSameServer);
+
+    // test with replication = 4 for following:
+
+    HRegionInfo replica3;
+    Iterator<Entry<ServerName, List<HRegionInfo>>> it;
+    Entry<ServerName, List<HRegionInfo>> entry;
+
+    clusterState = mockClusterServers(servers);
+    it = clusterState.entrySet().iterator();
+    entry = it.next(); //first server
+    HRegionInfo hri = entry.getValue().get(0);
+    replica1 = RegionReplicaUtil.getRegionInfoForReplica(hri, 1);
+    replica2 = RegionReplicaUtil.getRegionInfoForReplica(hri, 2);
+    replica3 = RegionReplicaUtil.getRegionInfoForReplica(hri, 3);
+    entry.getValue().add(replica1);
+    entry.getValue().add(replica2);
+    it.next().getValue().add(replica3); //2nd server
+
+    cluster = new BaseLoadBalancer.Cluster(master, clusterState, null, null, 
null, null, null);
+    costFunction.init(cluster);
+    double costWith3ReplicasSameServer = costFunction.cost();
+
+    clusterState = mockClusterServers(servers);
+    hri = clusterState.firstEntry().getValue().get(0);
+    replica1 = RegionReplicaUtil.getRegionInfoForReplica(hri, 1);
+    replica2 = RegionReplicaUtil.getRegionInfoForReplica(hri, 2);
+    replica3 = RegionReplicaUtil.getRegionInfoForReplica(hri, 3);
+
+    clusterState.firstEntry().getValue().add(replica1);
+    clusterState.lastEntry().getValue().add(replica2);
+    clusterState.lastEntry().getValue().add(replica3);
+
+    cluster = new BaseLoadBalancer.Cluster(master, clusterState, null, null, 
null, null, null);
+    costFunction.init(cluster);
+    double costWith2ReplicasOnTwoServers = costFunction.cost();
+
+    assertTrue(costWith2ReplicasOnTwoServers < costWith3ReplicasSameServer);
+  }
+
   @Test (timeout = 60000)
   public void testSmallCluster() {
     int numNodes = 10;
     int numRegions = 1000;
     int numRegionsPerServer = 40; //all servers except one
+    int replication = 1;
     int numTables = 10;
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, numTables, 
true);
+    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
   }
 
   @Test (timeout = 60000)
@@ -289,8 +398,9 @@ public class TestStochasticLoadBalancer extends 
BalancerTestBase {
     int numNodes = 20;
     int numRegions = 2000;
     int numRegionsPerServer = 40; //all servers except one
+    int replication = 1;
     int numTables = 10;
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, numTables, 
true);
+    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
   }
 
   @Test (timeout = 60000)
@@ -298,8 +408,10 @@ public class TestStochasticLoadBalancer extends 
BalancerTestBase {
     int numNodes = 20;
     int numRegions = 2000;
     int numRegionsPerServer = 1; // all servers except one
+    int replication = 1;
     int numTables = 10;
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, numTables, 
false /* max moves */);
+    /* fails because of max moves */
+    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, false, false);
   }
 
   @Test (timeout = 800000)
@@ -307,8 +419,9 @@ public class TestStochasticLoadBalancer extends 
BalancerTestBase {
     int numNodes = 100;
     int numRegions = 10000;
     int numRegionsPerServer = 60; // all servers except one
+    int replication = 1;
     int numTables = 40;
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, numTables, 
true);
+    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
   }
 
   @Test (timeout = 800000)
@@ -316,12 +429,15 @@ public class TestStochasticLoadBalancer extends 
BalancerTestBase {
     int numNodes = 200;
     int numRegions = 100000;
     int numRegionsPerServer = 40; // all servers except one
+    int replication = 1;
     int numTables = 400;
     testWithCluster(numNodes,
         numRegions,
         numRegionsPerServer,
+        replication,
         numTables,
-        false /* num large num regions means may not always get to best 
balance with one run */);
+        false, /* num large num regions means may not always get to best 
balance with one run */
+        false);
   }
 
 
@@ -330,8 +446,9 @@ public class TestStochasticLoadBalancer extends 
BalancerTestBase {
     int numNodes = 100;
     int numRegions = 2000;
     int numRegionsPerServer = 9; // all servers except one
+    int replication = 1;
     int numTables = 110;
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, numTables, 
true);
+    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
     // TODO(eclark): Make sure that the tables are well distributed.
   }
 
@@ -341,20 +458,145 @@ public class TestStochasticLoadBalancer extends 
BalancerTestBase {
     int numRegions = 100000; //100 regions per RS
     int numRegionsPerServer = 80; //all servers except one
     int numTables = 100;
-    testWithCluster(numNodes, numRegions, numRegionsPerServer, numTables, 
true);
+    int replication = 1;
+    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
+  }
+
+  @Test (timeout = 60000)
+  public void testRegionReplicasOnSmallCluster() {
+    int numNodes = 10;
+    int numRegions = 1000;
+    int replication = 3; // 3 replicas per region
+    int numRegionsPerServer = 80; //all regions are mostly balanced
+    int numTables = 10;
+    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
+  }
+
+  @Test (timeout = 60000)
+  public void testRegionReplicasOnMidCluster() {
+    conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
+    loadBalancer.setConf(conf);
+    int numNodes = 200;
+    int numRegions = 40 * 200;
+    int replication = 3; // 3 replicas per region
+    int numRegionsPerServer = 30; //all regions are mostly balanced
+    int numTables = 10;
+    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
+  }
+
+  @Test (timeout = 60000)
+  public void testRegionReplicasOnLargeCluster() {
+    conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
+    loadBalancer.setConf(conf);
+    int numNodes = 1000;
+    int numRegions = 40 * numNodes; //40 regions per RS
+    int numRegionsPerServer = 30; //all servers except one
+    int numTables = 100;
+    int replication = 3;
+    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
+  }
+
+  @Test (timeout = 60000)
+  public void testRegionReplicasOnMidClusterHighReplication() {
+    conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L);
+    conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
+    loadBalancer.setConf(conf);
+    int numNodes = 100;
+    int numRegions = 6 * 100;
+    int replication = 100; // 100 replicas per region, one for each server
+    int numRegionsPerServer = 5;
+    int numTables = 10;
+    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, true);
+  }
+
+  @Test (timeout = 60000)
+  public void testRegionReplicationOnMidClusterSameHosts() {
+    conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L);
+    conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
+    loadBalancer.setConf(conf);
+    int numHosts = 100;
+    int numRegions = 100 * 100;
+    int replication = 3; // 3 replicas per region
+    int numRegionsPerServer = 5;
+    int numTables = 10;
+    Map<ServerName, List<HRegionInfo>> serverMap =
+        createServerMap(numHosts, numRegions, numRegionsPerServer, 
replication, numTables);
+    int numNodesPerHost = 4;
+
+    // create a new map with 4 RS per host.
+    Map<ServerName, List<HRegionInfo>> newServerMap = new TreeMap<ServerName, 
List<HRegionInfo>>(serverMap);
+    for (Map.Entry<ServerName, List<HRegionInfo>> entry : 
serverMap.entrySet()) {
+      for (int i=1; i < numNodesPerHost; i++) {
+        ServerName s1 = entry.getKey();
+        ServerName s2 = ServerName.valueOf(s1.getHostname(), s1.getPort() + i, 
1); // create an RS for the same host
+        newServerMap.put(s2, new ArrayList<HRegionInfo>());
+      }
+    }
+
+    testWithCluster(newServerMap, null, true, true);
+  }
+
+  private static class ForTestRackManager extends RackManager {
+    int numRacks;
+    public ForTestRackManager(int numRacks) {
+      this.numRacks = numRacks;
+    }
+    @Override
+    public String getRack(ServerName server) {
+      return "rack_" + (server.hashCode() % numRacks);
+    }
+  }
+
+  @Test (timeout = 120000)
+  public void testRegionReplicationOnMidClusterWithRacks() {
+    conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 4000000L);
+    conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
+    conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 60 * 
1000); // 60 sec
+    loadBalancer.setConf(conf);
+    int numNodes = 50;
+    int numRegions = numNodes * 30;
+    int replication = 3; // 3 replicas per region
+    int numRegionsPerServer = 25;
+    int numTables = 10;
+    int numRacks = 4; // all replicas should be on a different rack
+    Map<ServerName, List<HRegionInfo>> serverMap =
+        createServerMap(numNodes, numRegions, numRegionsPerServer, 
replication, numTables);
+    RackManager rm = new ForTestRackManager(numRacks);
+
+    testWithCluster(serverMap, rm, true, true);
+  }
+
+  @Test (timeout = 60000)
+  public void 
testRegionReplicationOnMidClusterReplicationGreaterThanNumNodes() {
+    conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L);
+    conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
+    loadBalancer.setConf(conf);
+    int numNodes = 80;
+    int numRegions = 6 * 100;
+    int replication = 100; // 100 replicas per region, more than numNodes
+    int numRegionsPerServer = 5;
+    int numTables = 10;
+    testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, 
numTables, true, false);
   }
 
   protected void testWithCluster(int numNodes,
-                                 int numRegions,
-                                 int numRegionsPerServer,
-                                 int numTables,
-                                 boolean assertFullyBalanced) {
+      int numRegions,
+      int numRegionsPerServer,
+      int replication,
+      int numTables,
+      boolean assertFullyBalanced, boolean assertFullyBalancedForReplicas) {
     Map<ServerName, List<HRegionInfo>> serverMap =
-        createServerMap(numNodes, numRegions, numRegionsPerServer, numTables);
+        createServerMap(numNodes, numRegions, numRegionsPerServer, 
replication, numTables);
+    testWithCluster(serverMap, null, assertFullyBalanced, 
assertFullyBalancedForReplicas);
+  }
+
 
+  protected void testWithCluster(Map<ServerName, List<HRegionInfo>> serverMap,
+      RackManager rackManager, boolean assertFullyBalanced, boolean 
assertFullyBalancedForReplicas) {
     List<ServerAndLoad> list = convertToList(serverMap);
     LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list));
 
+    loadBalancer.setRackManager(rackManager);
     // Run the balancer.
     List<RegionPlan> plans = loadBalancer.balanceCluster(serverMap);
     assertNotNull(plans);
@@ -369,12 +611,16 @@ public class TestStochasticLoadBalancer extends 
BalancerTestBase {
       assertClusterAsBalanced(balancedCluster);
       List<RegionPlan> secondPlans =  loadBalancer.balanceCluster(serverMap);
       assertNull(secondPlans);
+      if (assertFullyBalancedForReplicas) {
+        assertRegionReplicaPlacement(serverMap, rackManager);
+      }
     }
   }
 
   private Map<ServerName, List<HRegionInfo>> createServerMap(int numNodes,
                                                              int numRegions,
                                                              int 
numRegionsPerServer,
+                                                             int replication,
                                                              int numTables) {
     //construct a cluster of numNodes, having  a total of numRegions. Each RS 
will hold
     //numRegionsPerServer many regions except for the last one, which will 
host all the
@@ -384,6 +630,40 @@ public class TestStochasticLoadBalancer extends 
BalancerTestBase {
       cluster[i] = numRegionsPerServer;
     }
     cluster[cluster.length - 1] = numRegions - ((cluster.length - 1) * 
numRegionsPerServer);
-    return mockClusterServers(cluster, numTables);
+    Map<ServerName, List<HRegionInfo>> clusterState = 
mockClusterServers(cluster, numTables);
+    if (replication > 0) {
+      // replicate the regions to the same servers
+      for (List<HRegionInfo> regions : clusterState.values()) {
+        int length = regions.size();
+        for (int i = 0; i < length; i++) {
+          for (int r = 1; r < replication ; r++) {
+            
regions.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(i), r));
+          }
+        }
+      }
+    }
+
+    return clusterState;
+  }
+
+  public static class MyRackResolver implements DNSToSwitchMapping {
+
+    public MyRackResolver(Configuration conf) {}
+
+    @Override
+    public List<String> resolve(List<String> names) {
+      List<String> racks = new ArrayList<String>(names.size());
+      for (int i = 0; i < names.size(); i++) {
+        racks.add(i, NetworkTopology.DEFAULT_RACK);
+      }
+      return racks;
+    }
+
+    @Override
+    public void reloadCachedMappings() {}
+
+    @Override
+    public void reloadCachedMappings(List<String> names) {
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a98f5295/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
index cc2235f..86e6b89 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
@@ -105,7 +105,7 @@ public class TestRegionReplicas {
   private void openRegion(HRegionInfo hri) throws Exception {
     ZKAssign.createNodeOffline(HTU.getZooKeeperWatcher(), hri, 
getRS().getServerName());
     // first version is '0'
-    AdminProtos.OpenRegionRequest orr = 
RequestConverter.buildOpenRegionRequest(getRS().getServerName(), hri, 0, null);
+    AdminProtos.OpenRegionRequest orr = 
RequestConverter.buildOpenRegionRequest(getRS().getServerName(), hri, 0, null, 
null);
     AdminProtos.OpenRegionResponse responseOpen = 
getRS().getRSRpcServices().openRegion(null, orr);
     Assert.assertTrue(responseOpen.getOpeningStateCount() == 1);
     Assert.assertTrue(responseOpen.getOpeningState(0).

Reply via email to