This is an automated email from the ASF dual-hosted git repository. vjasani pushed a commit to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2.3 by this push: new 5ca0477 HBASE-25383: Ability to update and remove peer base config 5ca0477 is described below commit 5ca04777c4097d4617a16b0209455ab72e753cac Author: Sandeep Pal <sandeep....@salesforce.com> AuthorDate: Fri Dec 18 13:23:00 2020 +0530 HBASE-25383: Ability to update and remove peer base config Closes #2778 Signed-off-by: Bharath Vissapragada <bhara...@apache.org> Signed-off-by: Geoffrey Jacoby <gjac...@apache.org> Signed-off-by: Viraj Jasani <vjas...@apache.org> --- .../replication/ReplicationPeerConfigUtil.java | 34 ++++--- .../hbase/replication/ReplicationPeerConfig.java | 6 ++ .../replication/ReplicationPeerConfigBuilder.java | 9 ++ .../replication/TestZKReplicationPeerStorage.java | 67 +++++++++++-- .../master/replication/ReplicationPeerManager.java | 4 +- .../hbase/replication/TestMasterReplication.java | 107 +++++++++++++++------ 6 files changed, 174 insertions(+), 53 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java index 9be8409..0c015e0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java @@ -27,7 +27,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompoundConfiguration; @@ -39,12 +38,12 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerConfigBuilder; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hbase.thirdparty.com.google.common.base.Splitter; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - +import org.apache.hbase.thirdparty.com.google.common.base.Splitter; +import org.apache.hbase.thirdparty.com.google.common.base.Strings; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; @@ -245,7 +244,7 @@ public final class ReplicationPeerConfigUtil { /** * @param bytes Content of a peer znode. * @return ClusterKey parsed from the passed bytes. - * @throws DeserializationException + * @throws DeserializationException deserialization exception */ public static ReplicationPeerConfig parsePeerFrom(final byte[] bytes) throws DeserializationException { @@ -383,7 +382,7 @@ public final class ReplicationPeerConfigUtil { } /** - * @param peerConfig + * @param peerConfig peer config of replication peer * @return Serialized protobuf of <code>peerConfig</code> with pb magic prefix prepended suitable * for use as content of a this.peersZNode; i.e. the content of PEER_ID znode under * /hbase/replication/peers/PEER_ID @@ -428,37 +427,42 @@ public final class ReplicationPeerConfigUtil { } /** - * Helper method to add base peer configs from Configuration to ReplicationPeerConfig - * if not present in latter. + * Helper method to add/removev base peer configs from Configuration to ReplicationPeerConfig * * This merges the user supplied peer configuration * {@link org.apache.hadoop.hbase.replication.ReplicationPeerConfig} with peer configs * provided as property hbase.replication.peer.base.configs in hbase configuration. - * Expected format for this hbase configuration is "k1=v1;k2=v2,v2_1". Original value - * of conf is retained if already present in ReplicationPeerConfig. + * Expected format for this hbase configuration is "k1=v1;k2=v2,v2_1;k3=""". + * If value is empty, it will remove the existing key-value from peer config. * * @param conf Configuration * @return ReplicationPeerConfig containing updated configs. */ - public static ReplicationPeerConfig addBasePeerConfigsIfNotPresent(Configuration conf, + public static ReplicationPeerConfig updateReplicationBasePeerConfigs(Configuration conf, ReplicationPeerConfig receivedPeerConfig) { - String basePeerConfigs = conf.get(HBASE_REPLICATION_PEER_BASE_CONFIG, ""); ReplicationPeerConfigBuilder copiedPeerConfigBuilder = ReplicationPeerConfig. newBuilder(receivedPeerConfig); - Map<String,String> receivedPeerConfigMap = receivedPeerConfig.getConfiguration(); + Map<String, String> receivedPeerConfigMap = receivedPeerConfig.getConfiguration(); + String basePeerConfigs = conf.get(HBASE_REPLICATION_PEER_BASE_CONFIG, ""); if (basePeerConfigs.length() != 0) { Map<String, String> basePeerConfigMap = Splitter.on(';').trimResults().omitEmptyStrings() .withKeyValueSeparator("=").split(basePeerConfigs); - for (Map.Entry<String,String> entry : basePeerConfigMap.entrySet()) { + for (Map.Entry<String, String> entry : basePeerConfigMap.entrySet()) { String configName = entry.getKey(); String configValue = entry.getValue(); - // Only override if base config does not exist in existing peer configs - if (!receivedPeerConfigMap.containsKey(configName)) { + // If the config is provided with empty value, for eg. k1="", + // we remove it from peer config. Providing config with empty value + // is required so that it doesn't remove any other config unknowingly. + if (Strings.isNullOrEmpty(configValue)) { + copiedPeerConfigBuilder.removeConfiguration(configName); + } else if (!receivedPeerConfigMap.getOrDefault(configName, "").equals(configValue)) { + // update the configuration if exact config and value doesn't exists copiedPeerConfigBuilder.putConfiguration(configName, configValue); } } } + return copiedPeerConfigBuilder.build(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java index 7c0f115..030ae3d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java @@ -278,6 +278,12 @@ public class ReplicationPeerConfig { } @Override + public ReplicationPeerConfigBuilder removeConfiguration(String key) { + this.configuration.remove(key); + return this; + } + + @Override public ReplicationPeerConfigBuilder putPeerData(byte[] key, byte[] value) { this.peerData.put(key, value); return this; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java index 4c531c5..180239b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java @@ -53,6 +53,15 @@ public interface ReplicationPeerConfigBuilder { ReplicationPeerConfigBuilder putConfiguration(String key, String value); /** + * Removes a "raw" configuration property for this replication peer. For experts only. + * @param key Configuration property key to ve removed + * @return {@code this} + */ + @InterfaceAudience.Private + ReplicationPeerConfigBuilder removeConfiguration(String key); + + + /** * Adds all of the provided "raw" configuration entries to {@code this}. * @param configuration A collection of raw configuration entries * @return {@code this} diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java index d4ad292..77b71ad 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java @@ -25,7 +25,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; - import java.io.IOException; import java.util.HashMap; import java.util.Iterator; @@ -34,7 +33,6 @@ import java.util.Map; import java.util.Random; import java.util.Set; import java.util.stream.Stream; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseZKTestingUtility; @@ -42,6 +40,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -70,6 +69,11 @@ public class TestZKReplicationPeerStorage { UTIL.shutdownMiniZKCluster(); } + @After + public void cleanCustomConfigurations() { + UTIL.getConfiguration().unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); + } + private Set<String> randNamespaces(Random rand) { return Stream.generate(() -> Long.toHexString(rand.nextLong())).limit(rand.nextInt(5)) .collect(toSet()); @@ -180,8 +184,7 @@ public class TestZKReplicationPeerStorage { } } - @Test - public void testBaseReplicationPeerConfig() { + @Test public void testBaseReplicationPeerConfig() throws ReplicationException{ String customPeerConfigKey = "hbase.xxx.custom_config"; String customPeerConfigValue = "test"; String customPeerConfigUpdatedValue = "testUpdated"; @@ -201,7 +204,7 @@ public class TestZKReplicationPeerStorage { concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondValue)); ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. - addBasePeerConfigsIfNotPresent(conf,existingReplicationPeerConfig); + updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); // validates base configs are present in replicationPeerConfig assertEquals(customPeerConfigValue, updatedReplicationPeerConfig.getConfiguration(). @@ -209,17 +212,63 @@ public class TestZKReplicationPeerStorage { assertEquals(customPeerConfigSecondValue, updatedReplicationPeerConfig.getConfiguration(). get(customPeerConfigSecondKey)); - // validates base configs does not override value if config already present + // validates base configs get updated values even if config already present + conf.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, customPeerConfigKey.concat("=").concat(customPeerConfigUpdatedValue).concat(";"). concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondUpdatedValue)); ReplicationPeerConfig replicationPeerConfigAfterValueUpdate = ReplicationPeerConfigUtil. - addBasePeerConfigsIfNotPresent(conf,updatedReplicationPeerConfig); + updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); - assertEquals(customPeerConfigValue, replicationPeerConfigAfterValueUpdate. + assertEquals(customPeerConfigUpdatedValue, replicationPeerConfigAfterValueUpdate. getConfiguration().get(customPeerConfigKey)); - assertEquals(customPeerConfigSecondValue, replicationPeerConfigAfterValueUpdate. + assertEquals(customPeerConfigSecondUpdatedValue, replicationPeerConfigAfterValueUpdate. getConfiguration().get(customPeerConfigSecondKey)); } + + @Test public void testBaseReplicationRemovePeerConfig() throws ReplicationException { + String customPeerConfigKey = "hbase.xxx.custom_config"; + String customPeerConfigValue = "test"; + ReplicationPeerConfig existingReplicationPeerConfig = getConfig(1); + + // custom config not present + assertEquals(existingReplicationPeerConfig.getConfiguration().get(customPeerConfigKey), null); + + Configuration conf = UTIL.getConfiguration(); + conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + customPeerConfigKey.concat("=").concat(customPeerConfigValue)); + + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. + updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + + // validates base configs are present in replicationPeerConfig + assertEquals(customPeerConfigValue, updatedReplicationPeerConfig.getConfiguration(). + get(customPeerConfigKey)); + + conf.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); + conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + customPeerConfigKey.concat("=").concat("")); + + ReplicationPeerConfig replicationPeerConfigRemoved = ReplicationPeerConfigUtil. + updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); + + assertNull(replicationPeerConfigRemoved.getConfiguration().get(customPeerConfigKey)); + } + + @Test public void testBaseReplicationRemovePeerConfigWithNoExistingConfig() + throws ReplicationException { + String customPeerConfigKey = "hbase.xxx.custom_config"; + ReplicationPeerConfig existingReplicationPeerConfig = getConfig(1); + + // custom config not present + assertEquals(existingReplicationPeerConfig.getConfiguration().get(customPeerConfigKey), null); + Configuration conf = UTIL.getConfiguration(); + conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + customPeerConfigKey.concat("=").concat("")); + + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. + updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + assertNull(updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigKey)); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java index 90a61cf..32d9051 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java @@ -161,7 +161,7 @@ public class ReplicationPeerManager { // this should be a retry, just return return; } - peerConfig = ReplicationPeerConfigUtil.addBasePeerConfigsIfNotPresent(conf, peerConfig); + peerConfig = ReplicationPeerConfigUtil.updateReplicationBasePeerConfigs(conf, peerConfig); ReplicationPeerConfig copiedPeerConfig = ReplicationPeerConfig.newBuilder(peerConfig).build(); peerStorage.addPeer(peerId, copiedPeerConfig, enabled); peers.put(peerId, new ReplicationPeerDescription(peerId, enabled, copiedPeerConfig)); @@ -377,7 +377,7 @@ public class ReplicationPeerManager { for (String peerId : peerStorage.listPeerIds()) { ReplicationPeerConfig peerConfig = peerStorage.getPeerConfig(peerId); - peerConfig = ReplicationPeerConfigUtil.addBasePeerConfigsIfNotPresent(conf, peerConfig); + peerConfig = ReplicationPeerConfigUtil.updateReplicationBasePeerConfigs(conf, peerConfig); peerStorage.updatePeerConfig(peerId, peerConfig); boolean enabled = peerStorage.isPeerEnabled(peerId); peers.put(peerId, new ReplicationPeerDescription(peerId, enabled, peerConfig)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index 8f101ce..0cd6821 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; - import java.io.Closeable; import java.io.IOException; import java.util.Arrays; @@ -30,7 +29,6 @@ import java.util.List; import java.util.Optional; import java.util.Random; import java.util.concurrent.CountDownLatch; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -230,8 +228,8 @@ public class TestMasterReplication { // Load 100 rows for each hfile range in cluster '0' and validate whether its been replicated // to cluster '1'. byte[][][] hfileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, - new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; + new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; int numOfRows = 100; int[] expectedCounts = new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; @@ -242,10 +240,10 @@ public class TestMasterReplication { // Load 200 rows for each hfile range in cluster '1' and validate whether its been replicated // to cluster '0'. hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("gggg"), Bytes.toBytes("iiii") }, - new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; + new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; numOfRows = 200; int[] newExpectedCounts = new int[] { hfileRanges.length * numOfRows + expectedCounts[0], - hfileRanges.length * numOfRows + expectedCounts[1] }; + hfileRanges.length * numOfRows + expectedCounts[1] }; loadAndValidateHFileReplication("testHFileCyclicReplication_10", 1, new int[] { 0 }, row, famName, htables, hfileRanges, numOfRows, newExpectedCounts, true); @@ -344,12 +342,12 @@ public class TestMasterReplication { // Load 100 rows for each hfile range in cluster '0' and validate whether its been replicated // to cluster '1'. byte[][][] hfileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes("mmmm"), Bytes.toBytes("oooo") }, - new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("rrr") }, }; + new byte[][][] { new byte[][] { Bytes.toBytes("mmmm"), Bytes.toBytes("oooo") }, + new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("rrr") }, }; int numOfRows = 100; int[] expectedCounts = - new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; + new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; loadAndValidateHFileReplication("testHFileCyclicReplication_0", 0, new int[] { 1 }, row, famName, htables, hfileRanges, numOfRows, expectedCounts, true); @@ -365,11 +363,11 @@ public class TestMasterReplication { // Load 200 rows for each hfile range in cluster '0' and validate whether its been replicated // to cluster '1' and '2'. Previous data should be replicated to cluster '2'. hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("ssss"), Bytes.toBytes("uuuu") }, - new byte[][] { Bytes.toBytes("vvv"), Bytes.toBytes("xxx") }, }; + new byte[][] { Bytes.toBytes("vvv"), Bytes.toBytes("xxx") }, }; numOfRows = 200; int[] newExpectedCounts = new int[] { hfileRanges.length * numOfRows + expectedCounts[0], - hfileRanges.length * numOfRows + expectedCounts[1], hfileRanges.length * numOfRows }; + hfileRanges.length * numOfRows + expectedCounts[1], hfileRanges.length * numOfRows }; loadAndValidateHFileReplication("testHFileCyclicReplication_1", 0, new int[] { 1, 2 }, row, famName, htables, hfileRanges, numOfRows, newExpectedCounts, true); @@ -400,8 +398,8 @@ public class TestMasterReplication { // Load 100 rows for each hfile range in cluster '0' for table CF 'f' byte[][][] hfileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, - new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; + new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; int numOfRows = 100; int[] expectedCounts = new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; @@ -411,11 +409,11 @@ public class TestMasterReplication { // Load 100 rows for each hfile range in cluster '0' for table CF 'f1' hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("gggg"), Bytes.toBytes("iiii") }, - new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; + new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; numOfRows = 100; int[] newExpectedCounts = - new int[] { hfileRanges.length * numOfRows + expectedCounts[0], expectedCounts[1] }; + new int[] { hfileRanges.length * numOfRows + expectedCounts[0], expectedCounts[1] }; loadAndValidateHFileReplication("load_f1", 0, new int[] { 1 }, row, famName1, htables, hfileRanges, numOfRows, newExpectedCounts, false); @@ -479,7 +477,7 @@ public class TestMasterReplication { * */ @Test - public void testBasePeerConfigsForPeerMutations() + public void testBasePeerConfigsForReplicationPeer() throws Exception { LOG.info("testBasePeerConfigsForPeerMutations"); String firstCustomPeerConfigKey = "hbase.xxx.custom_config"; @@ -532,18 +530,15 @@ public class TestMasterReplication { utilities[0].restartHBaseCluster(1); admin = utilities[0].getAdmin(); - // Both retains the value of base configuration 1 value as before restart. - // Peer 1 (Update value), Peer 2 (Base Value) - Assert.assertEquals(firstCustomPeerConfigUpdatedValue, admin.getReplicationPeerConfig("1"). + // Configurations should be updated after restart again + Assert.assertEquals(firstCustomPeerConfigValue, admin.getReplicationPeerConfig("1"). getConfiguration().get(firstCustomPeerConfigKey)); Assert.assertEquals(firstCustomPeerConfigValue, admin.getReplicationPeerConfig("2"). getConfiguration().get(firstCustomPeerConfigKey)); - // Peer 1 gets new base config as part of restart. Assert.assertEquals(secondCustomPeerConfigValue, admin.getReplicationPeerConfig("1"). getConfiguration().get(secondCustomPeerConfigKey)); - // Peer 2 retains the updated value as before restart. - Assert.assertEquals(secondCustomPeerConfigUpdatedValue, admin.getReplicationPeerConfig("2"). + Assert.assertEquals(secondCustomPeerConfigValue, admin.getReplicationPeerConfig("2"). getConfiguration().get(secondCustomPeerConfigKey)); } finally { shutDownMiniClusters(); @@ -551,6 +546,64 @@ public class TestMasterReplication { } } + @Test + public void testBasePeerConfigsRemovalForReplicationPeer() + throws Exception { + LOG.info("testBasePeerConfigsForPeerMutations"); + String firstCustomPeerConfigKey = "hbase.xxx.custom_config"; + String firstCustomPeerConfigValue = "test"; + + try { + baseConfiguration.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + firstCustomPeerConfigKey.concat("=").concat(firstCustomPeerConfigValue)); + startMiniClusters(2); + addPeer("1", 0, 1); + Admin admin = utilities[0].getAdmin(); + + // Validates base configs 1 is present for both peer. + Assert.assertEquals(firstCustomPeerConfigValue, admin.getReplicationPeerConfig("1"). + getConfiguration().get(firstCustomPeerConfigKey)); + + utilities[0].getConfiguration().unset(ReplicationPeerConfigUtil. + HBASE_REPLICATION_PEER_BASE_CONFIG); + utilities[0].getConfiguration().set(ReplicationPeerConfigUtil. + HBASE_REPLICATION_PEER_BASE_CONFIG, firstCustomPeerConfigKey.concat("=").concat("")); + + + utilities[0].shutdownMiniHBaseCluster(); + utilities[0].restartHBaseCluster(1); + admin = utilities[0].getAdmin(); + + // Configurations should be removed after restart again + Assert.assertNull(admin.getReplicationPeerConfig("1") + .getConfiguration().get(firstCustomPeerConfigKey)); + } finally { + shutDownMiniClusters(); + baseConfiguration.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); + } + } + + @Test + public void testRemoveBasePeerConfigWithoutExistingConfigForReplicationPeer() + throws Exception { + LOG.info("testBasePeerConfigsForPeerMutations"); + String firstCustomPeerConfigKey = "hbase.xxx.custom_config"; + + try { + baseConfiguration.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + firstCustomPeerConfigKey.concat("=").concat("")); + startMiniClusters(2); + addPeer("1", 0, 1); + Admin admin = utilities[0].getAdmin(); + + Assert.assertNull("Config should not be there", admin.getReplicationPeerConfig("1"). + getConfiguration().get(firstCustomPeerConfigKey)); + } finally { + shutDownMiniClusters(); + baseConfiguration.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); + } + } + @After public void tearDown() throws IOException { configurations = null; @@ -775,11 +828,11 @@ public class TestMasterReplication { // listen for successful log rolls final WALActionsListener listener = new WALActionsListener() { - @Override - public void postLogRoll(final Path oldPath, final Path newPath) throws IOException { - latch.countDown(); - } - }; + @Override + public void postLogRoll(final Path oldPath, final Path newPath) throws IOException { + latch.countDown(); + } + }; region.getWAL().registerWALActionsListener(listener); // request a roll