[hadoop-ozone] branch master updated: HDDS-3728. Bucket space: check quotaUsageInBytes when write key and allocate block. (#1458)

2020-10-09 Thread sammichen
This is an automated email from the ASF dual-hosted git repository.

sammichen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
 new d08a4c1  HDDS-3728. Bucket space: check quotaUsageInBytes when write 
key and allocate block. (#1458)
d08a4c1 is described below

commit d08a4c1d973f7752875436f497c4a007bea9f250
Author: micah zhao 
AuthorDate: Fri Oct 9 19:42:53 2020 +0800

HDDS-3728. Bucket space: check quotaUsageInBytes when write key and 
allocate block. (#1458)
---
 .../client/rpc/TestOzoneRpcClientAbstract.java | 67 --
 .../ozone/om/request/file/OMFileCreateRequest.java |  3 +-
 .../om/request/key/OMAllocateBlockRequest.java |  3 +-
 .../ozone/om/request/key/OMKeyCreateRequest.java   |  3 +-
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  | 21 +++
 5 files changed, 88 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index c9551fd..b7b75a4 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -815,6 +815,7 @@ public abstract class TestOzoneRpcClientAbstract {
   }
 
   @Test
+  @SuppressWarnings("methodlength")
   public void testCheckUsedBytesQuota() throws IOException {
 String volumeName = UUID.randomUUID().toString();
 String bucketName = UUID.randomUUID().toString();
@@ -828,13 +829,15 @@ public abstract class TestOzoneRpcClientAbstract {
 
 store.createVolume(volumeName);
 volume = store.getVolume(volumeName);
+
+// Test volume quota.
 // Set quota In Bytes for a smaller value
 store.getVolume(volumeName).setQuota(
 OzoneQuota.parseQuota("1 Bytes", 100));
 volume.createBucket(bucketName);
 OzoneBucket bucket = volume.getBucket(bucketName);
 
-// Test write key.
+// Test volume quota: write key.
 // The remaining quota does not satisfy a block size, so the write fails.
 try {
   writeKey(bucket, UUID.randomUUID().toString(), ONE, value, valueLength);
@@ -845,7 +848,7 @@ public abstract class TestOzoneRpcClientAbstract {
 // Write failed, volume usedBytes should be 0
 Assert.assertEquals(0L, store.getVolume(volumeName).getUsedBytes());
 
-// Test write file.
+// Test volume quota: write file.
 // The remaining quota does not satisfy a block size, so the write fails.
 try {
   writeFile(bucket, UUID.randomUUID().toString(), ONE, value, 0);
@@ -856,7 +859,7 @@ public abstract class TestOzoneRpcClientAbstract {
 // Write failed, volume usedBytes should be 0
 Assert.assertEquals(0L, store.getVolume(volumeName).getUsedBytes());
 
-// Write a key(with two blocks), test allocateBlock fails.
+// Test volume quota: write key(with two blocks), test allocateBlock fails.
 store.getVolume(volumeName).setQuota(
 OzoneQuota.parseQuota(blockSize + "Bytes", 100));
 try {
@@ -873,8 +876,8 @@ public abstract class TestOzoneRpcClientAbstract {
 // AllocateBlock failed, volume usedBytes should be 1 * blockSize.
 Assert.assertEquals(blockSize, store.getVolume(volumeName).getUsedBytes());
 
-// Write large key(with five blocks), the first four blocks will succeed,
-// while the later block will fail.
+// Test volume quota: write large key(with five blocks), the first four
+// blocks will succeed,while the later block will fail.
 store.getVolume(volumeName).setQuota(
 OzoneQuota.parseQuota(5 * blockSize + "Bytes", 100));
 try {
@@ -892,7 +895,59 @@ public abstract class TestOzoneRpcClientAbstract {
 Assert.assertEquals(5 * blockSize,
 store.getVolume(volumeName).getUsedBytes());
 
-Assert.assertEquals(4, countException);
+// Test bucket quota.
+// Set quota In Bytes for a smaller value
+store.getVolume(volumeName).setQuota(
+OzoneQuota.parseQuota(Long.MAX_VALUE + " Bytes", 100));
+bucketName = UUID.randomUUID().toString();
+volume.createBucket(bucketName);
+bucket = volume.getBucket(bucketName);
+bucket.setQuota(OzoneQuota.parseQuota("1 Bytes", 100));
+
+// Test bucket quota: write key.
+// The remaining quota does not satisfy a block size, so the write fails.
+try {
+  writeKey(bucket, UUID.randomUUID().toString(), ONE, value, valueLength);
+} catch (IOException ex) {
+  countException++;
+  GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex);
+}
+// Write failed, bucket usedBytes should be 0
+Assert.assertEquals(0L,
+store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());

[hadoop-ozone] branch master updated: HDDS-3814. Drop a column family through debug cli tool (#1083)

2020-10-09 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
 new 7704cb5  HDDS-3814. Drop a column family through debug cli tool (#1083)
7704cb5 is described below

commit 7704cb5f6920d6bbf6b446c28a2b3fe4408e0568
Author: maobaolong 
AuthorDate: Fri Oct 9 20:39:44 2020 +0800

HDDS-3814. Drop a column family through debug cli tool (#1083)
---
 .../org/apache/hadoop/ozone/debug/DBScanner.java   | 18 ++---
 .../org/apache/hadoop/ozone/debug/DropTable.java   | 81 ++
 .../apache/hadoop/ozone/debug/RocksDBUtils.java| 49 +
 3 files changed, 135 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
index b1139df..1ceab42 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
@@ -37,7 +37,6 @@ import com.google.gson.GsonBuilder;
 import org.kohsuke.MetaInfServices;
 import org.rocksdb.ColumnFamilyDescriptor;
 import org.rocksdb.ColumnFamilyHandle;
-import org.rocksdb.Options;
 import org.rocksdb.RocksDB;
 import org.rocksdb.RocksIterator;
 import picocli.CommandLine;
@@ -150,19 +149,12 @@ public class DBScanner implements Callable, 
SubcommandWithParent {
 
   @Override
   public Void call() throws Exception {
-List cfs = new ArrayList<>();
+List cfs =
+RocksDBUtils.getColumnFamilyDescriptors(parent.getDbPath());
+
 final List columnFamilyHandleList =
-new ArrayList<>();
-List cfList = null;
-cfList = RocksDB.listColumnFamilies(new Options(),
-parent.getDbPath());
-if (cfList != null) {
-  for (byte[] b : cfList) {
-cfs.add(new ColumnFamilyDescriptor(b));
-  }
-}
-RocksDB rocksDB = null;
-rocksDB = RocksDB.openReadOnly(parent.getDbPath(),
+new ArrayList<>();
+RocksDB rocksDB = RocksDB.openReadOnly(parent.getDbPath(),
 cfs, columnFamilyHandleList);
 this.printAppropriateTable(columnFamilyHandleList,
rocksDB, parent.getDbPath());
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DropTable.java 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DropTable.java
new file mode 100644
index 000..161f1b2
--- /dev/null
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DropTable.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.debug;
+
+import org.apache.hadoop.hdds.cli.SubcommandWithParent;
+import org.rocksdb.ColumnFamilyDescriptor;
+import org.rocksdb.ColumnFamilyHandle;
+import org.rocksdb.RocksDB;
+import picocli.CommandLine;
+
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+/**
+ * Drop a column Family/Table in db.
+ */
+@CommandLine.Command(
+name = "drop_column_family",
+description = "drop column family in db."
+)
+public class DropTable implements Callable, SubcommandWithParent {
+
+  @CommandLine.Option(names = {"--column_family"},
+  description = "Table name")
+  private String tableName;
+
+  @CommandLine.ParentCommand
+  private RDBParser parent;
+
+  @Override
+  public Void call() throws Exception {
+List cfs =
+RocksDBUtils.getColumnFamilyDescriptors(parent.getDbPath());
+final List columnFamilyHandleList =
+new ArrayList<>();
+try (RocksDB rocksDB = RocksDB.open(
+parent.getDbPath(), cfs, columnFamilyHandleList)) {
+  byte[] nameBytes = tableName.getBytes(StandardCharsets.UTF_8);
+  ColumnFamilyHandle toBeDeletedCf = null;
+  for (ColumnFamilyHandle cf : columnFamilyHandleList) {
+if (Arrays.equals(cf.getName(), nameBytes)) {
+  toBeDeletedCf = cf;
+  break;
+}
+  }
+  if (toBeDeletedCf 

[hadoop-ozone] branch master updated: HDDS-4311. Type-safe config design doc points to OM HA (#1477)

2020-10-09 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
 new a1d53b0  HDDS-4311. Type-safe config design doc points to OM HA (#1477)
a1d53b0 is described below

commit a1d53b0781f5a9c89b665210e6853ed551892e47
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Fri Oct 9 14:42:45 2020 +0200

HDDS-4311. Type-safe config design doc points to OM HA (#1477)
---
 hadoop-hdds/docs/content/design/typesafeconfig.md | 10 +++---
 1 file changed, 3 insertions(+), 7 deletions(-)

diff --git a/hadoop-hdds/docs/content/design/typesafeconfig.md 
b/hadoop-hdds/docs/content/design/typesafeconfig.md
index 77a3b4d..dfe5ef0 100644
--- a/hadoop-hdds/docs/content/design/typesafeconfig.md
+++ b/hadoop-hdds/docs/content/design/typesafeconfig.md
@@ -2,7 +2,7 @@
 title: Type-safe configuration API
 summary: Inject configuration values based on annotations instead of using 
constants and Hadoop API
 date: 2019-04-25
-jira: HDDS-505
+jira: HDDS-1466
 status: implemented
 author: Anu Engineer, Marton Elek
 ---
@@ -22,12 +22,8 @@ author: Anu Engineer, Marton Elek
 
 # Abstract
 
- HA for Ozone Manager with the help of Ratis. High performance operation with 
caching and double-buffer.
+ Generate configuration from annotated plain Java objects to make 
configuration more structured and type safe.
  
 # Link
 
- * 
https://issues.apache.org/jira/secure/attachment/12940314/OzoneManager%20HA.pdf
-
- * 
https://issues.apache.org/jira/secure/attachment/12990063/OM%20HA%20Cache%20Design.pdf
-
- * 
https://issues.apache.org/jira/secure/attachment/12973260/Handling%20Write%20Requests%20with%20OM%20HA.pdf
\ No newline at end of file
+ * https://issues.apache.org/jira/secure/attachment/12966991/typesafe.pdf


-
To unsubscribe, e-mail: ozone-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: ozone-commits-h...@hadoop.apache.org



[hadoop-ozone] branch master updated (a1d53b0 -> 5c5d8cb)

2020-10-09 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git.


from a1d53b0  HDDS-4311. Type-safe config design doc points to OM HA (#1477)
 add 5c5d8cb  HDDS-4312. findbugs check succeeds despite compile error 
(#1476)

No new revisions were added by this update.

Summary of changes:
 hadoop-ozone/dev-support/checks/findbugs.sh  | 7 +--
 .../interface-storage}/dev-support/findbugsExcludeFile.xml   | 2 +-
 hadoop-ozone/interface-storage/pom.xml   | 9 -
 3 files changed, 14 insertions(+), 4 deletions(-)
 copy {hadoop-hdds/interface-server => 
hadoop-ozone/interface-storage}/dev-support/findbugsExcludeFile.xml (93%)


-
To unsubscribe, e-mail: ozone-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: ozone-commits-h...@hadoop.apache.org



[hadoop-ozone] branch master updated (5c5d8cb -> 35cc6b0)

2020-10-09 Thread xyao
This is an automated email from the ASF dual-hosted git repository.

xyao pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git.


from 5c5d8cb  HDDS-4312. findbugs check succeeds despite compile error 
(#1476)
 add 35cc6b0  HDDS-4285. Read is slow due to frequent calls to 
UGI.getCurrentUser() and getTokens() (#1454)

No new revisions were added by this update.

Summary of changes:
 .../hadoop/hdds/scm/storage/BlockInputStream.java  |   8 +-
 .../hadoop/hdds/scm/storage/BlockOutputStream.java |  15 ++-
 .../hadoop/hdds/scm/storage/ChunkInputStream.java  |  10 +-
 .../hdds/scm/storage/DummyChunkInputStream.java|   2 +-
 .../storage/TestBlockOutputStreamCorrectness.java  |   2 +-
 .../hdds/scm/storage/ContainerProtocolCalls.java   | 139 -
 .../ozone/client/io/BlockOutputStreamEntry.java|   8 +-
 .../client/io/BlockOutputStreamEntryPool.java  |   5 +-
 .../hadoop/ozone/om/helpers/OmKeyLocationInfo.java |   6 +-
 .../hadoop/ozone/scm/TestContainerSmallFile.java   |  20 +--
 .../scm/TestGetCommittedBlockLengthAndPutKey.java  |   4 +-
 .../hadoop/ozone/scm/TestXceiverClientGrpc.java|   6 +-
 .../apache/hadoop/ozone/debug/ChunkKeyHandler.java |  11 +-
 13 files changed, 99 insertions(+), 137 deletions(-)


-
To unsubscribe, e-mail: ozone-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: ozone-commits-h...@hadoop.apache.org



[hadoop-ozone] branch master updated: HDDS-4262. Use ClientID and CallID from Rpc Client to detect retry requests (#1436)

2020-10-09 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
 new c956ce6  HDDS-4262. Use ClientID and CallID from Rpc Client to detect 
retry requests (#1436)
c956ce6 is described below

commit c956ce6b7537a0286c01b15d496a7ffeba90
Author: Bharat Viswanadham 
AuthorDate: Fri Oct 9 10:35:50 2020 -0700

HDDS-4262. Use ClientID and CallID from Rpc Client to detect retry requests 
(#1436)
---
 .../ozone/om/TestOzoneManagerHAMetadataOnly.java   | 76 ++
 .../ozone/om/ratis/OzoneManagerRatisServer.java| 11 +++-
 .../om/request/volume/OMVolumeCreateRequest.java   |  6 ++
 3 files changed, 91 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAMetadataOnly.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAMetadataOnly.java
index 754339e..fbe1762 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAMetadataOnly.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAMetadataOnly.java
@@ -30,9 +30,21 @@ import org.apache.hadoop.ozone.client.VolumeArgs;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
 import org.apache.hadoop.ozone.om.ha.OMProxyInfo;
+import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
 import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
+import org.apache.hadoop.ozone.om.request.volume.OMVolumeCreateRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateVolumeRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Logger;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftClientReply;
+import org.apache.ratis.protocol.RaftClientRequest;
+import org.apache.ratis.server.RaftServer;
 import org.junit.Assert;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -48,6 +60,7 @@ import java.util.List;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.Iterator;
+import java.util.UUID;
 
 import static 
org.apache.hadoop.ozone.MiniOzoneHAClusterImpl.NODE_FAILURE_TIMEOUT;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_WAIT_BETWEEN_RETRIES_MILLIS_DEFAULT;
@@ -348,6 +361,69 @@ public class TestOzoneManagerHAMetadataOnly extends 
TestOzoneManagerHA {
 Assert.assertTrue((long) flushCount >= 0);
   }
 
+  @Test
+  public void testOMRetryCache() throws Exception {
+ObjectStore objectStore = getObjectStore();
+objectStore.createVolume(UUID.randomUUID().toString());
+
+
+OMFailoverProxyProvider omFailoverProxyProvider = OmFailoverProxyUtil
+.getFailoverProxyProvider(objectStore.getClientProxy());
+
+String currentLeaderNodeId = omFailoverProxyProvider
+.getCurrentProxyOMNodeId();
+
+OzoneManagerRatisServer ozoneManagerRatisServer =
+getCluster().getOzoneManager(currentLeaderNodeId).getOmRatisServer();
+
+RaftServer raftServer = ozoneManagerRatisServer.getServer();
+
+ClientId clientId = ClientId.randomId();
+long callId = 2000L;
+String userName = UserGroupInformation.getCurrentUser().getUserName();
+String volumeName = UUID.randomUUID().toString();
+
+
+GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
+.captureLogs(OMVolumeCreateRequest.getLogger());
+OMRequest omRequest =
+OMRequest.newBuilder().setCreateVolumeRequest(
+CreateVolumeRequest.newBuilder().setVolumeInfo(
+VolumeInfo.newBuilder().setOwnerName(userName)
+.setAdminName(userName).setVolume(volumeName).build())
+.build()).setClientId(UUID.randomUUID().toString())
+.setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume).build();
+
+RaftClientReply raftClientReply =
+raftServer.submitClientRequest(new RaftClientRequest(clientId,
+ raftServer.getId(), ozoneManagerRatisServer.getRaftGroup()
+ .getGroupId(), callId,
+Message.valueOf(OMRatisHelper.convertRequestToByteString(omRequest)),
+RaftClientRequest.writeRequestType(), null));
+
+Assert.assertTrue(raftClientReply.isSuccess());
+
+Assert.assertTrue(logCapturer.getOutput().contains("created volume:"
++ volumeName));
+
+logCapturer.clearOutput();
+
+raft

[hadoop-ozone] branch HDDS-2823 updated: HDDS-4192: enable SCM Raft Group based on config ozone.scm.names (#1428)

2020-10-09 Thread licheng
This is an automated email from the ASF dual-hosted git repository.

licheng pushed a commit to branch HDDS-2823
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/HDDS-2823 by this push:
 new 7ddaa07  HDDS-4192: enable SCM Raft Group based on config 
ozone.scm.names (#1428)
7ddaa07 is described below

commit 7ddaa07d7de696c71113670c2f092cbc14f06658
Author: GlenGeng 
AuthorDate: Sat Oct 10 11:28:06 2020 +0800

HDDS-4192: enable SCM Raft Group based on config ozone.scm.names (#1428)

* HDDS-4192: enable SCM Raft Group based on config ozone.scm.names

* HDDS-4192: fix comments
---
 .../hadoop/hdds/scm/ha/SCMRatisServerImpl.java | 116 +++--
 1 file changed, 106 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
index 33ae109..8611b1f 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
@@ -18,17 +18,22 @@
 package org.apache.hadoop.hdds.scm.ha;
 
 import java.io.IOException;
+import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
 
+import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.ratis.conf.RaftProperties;
 import org.apache.ratis.protocol.ClientId;
 import org.apache.ratis.protocol.RaftClientReply;
@@ -38,11 +43,15 @@ import org.apache.ratis.protocol.RaftGroupId;
 import org.apache.ratis.protocol.RaftPeer;
 import org.apache.ratis.protocol.RaftPeerId;
 import org.apache.ratis.server.RaftServer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * TODO.
  */
 public class SCMRatisServerImpl implements SCMRatisServer {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(SCMRatisServerImpl.class);
 
   private final InetSocketAddress address;
   private final RaftServer server;
@@ -53,24 +62,20 @@ public class SCMRatisServerImpl implements SCMRatisServer {
   private final ClientId clientId = ClientId.randomId();
   private final AtomicLong callId = new AtomicLong();
 
-
   // TODO: Refactor and remove ConfigurationSource and use only
   //  SCMHAConfiguration.
   SCMRatisServerImpl(final SCMHAConfiguration haConf,
  final ConfigurationSource conf)
   throws IOException {
-final String scmServiceId = "SCM-HA-Service";
-final String scmNodeId = "localhost";
-this.raftPeerId = RaftPeerId.getRaftPeerId(scmNodeId);
 this.address = haConf.getRatisBindAddress();
-final RaftPeer localRaftPeer = new RaftPeer(raftPeerId, address);
-final List raftPeers = new ArrayList<>();
-raftPeers.add(localRaftPeer);
+
+SCMHAGroupBuilder scmHAGroupBuilder = new SCMHAGroupBuilder(haConf, conf);
+this.raftPeerId = scmHAGroupBuilder.getPeerId();
+this.raftGroupId = scmHAGroupBuilder.getRaftGroupId();
+this.raftGroup = scmHAGroupBuilder.getRaftGroup();
+
 final RaftProperties serverProperties = RatisUtil
 .newRaftProperties(haConf, conf);
-this.raftGroupId = RaftGroupId.valueOf(
-UUID.nameUUIDFromBytes(scmServiceId.getBytes(StandardCharsets.UTF_8)));
-this.raftGroup = RaftGroup.valueOf(raftGroupId, raftPeers);
 this.scmStateMachine = new SCMStateMachine();
 this.server = RaftServer.newBuilder()
 .setServerId(raftPeerId)
@@ -125,4 +130,95 @@ public class SCMRatisServerImpl implements SCMRatisServer {
   public List getRaftPeers() {
 return Collections.singletonList(new RaftPeer(raftPeerId));
   }
+
+
+  /**
+   * If the SCM group starts from {@link ScmConfigKeys#OZONE_SCM_NAMES},
+   * its raft peers should locate on different nodes, and use the same port
+   * to communicate with each other.
+   *
+   * Each of the raft peer figures out its {@link RaftPeerId} by computing
+   * its position in {@link ScmConfigKeys#OZONE_SCM_NAMES}.
+   *
+   * Assume {@link ScmConfigKeys#OZONE_SCM_NAMES} is "ip0,ip1,ip2",
+   * scm with ip0 identifies its {@link RaftPeerId} as scm0,
+   * scm with ip1 identifies its {@link RaftPeerId} as scm1,
+   * scm with ip2 identifies its {@link RaftPeerId} as scm2.
+   *
+   * After startup, they will form a {@link RaftGroup} with groupID
+   * "SCM-HA-Service", and communicate with each other via
+   * ozone.scm.ha.ratis.bind.po