This is an automated email from the ASF dual-hosted git repository.

captainzmc pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new ed029c976a HDDS-8702. Extract handler for reconfigurable properties 
(#4794)
ed029c976a is described below

commit ed029c976a7e88d78bb45cc4d66641242a24b672
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Wed Jun 7 08:14:29 2023 +0200

    HDDS-8702. Extract handler for reconfigurable properties (#4794)
    
    * HDDS-8702. Extract handler for reconfigurable properties
---
 .../ozone/HddsDatanodeClientProtocolServer.java    |  83 ++------------
 .../apache/hadoop/ozone/HddsDatanodeService.java   |  29 +++--
 .../hadoop/hdds/conf/ReconfigurationHandler.java   | 106 ++++++++++++++++++
 .../org/apache/hadoop/hdds/conf/package-info.java  |   9 +-
 .../hadoop/hdds/server/ServiceRuntimeInfoImpl.java |  25 +----
 .../apache/hadoop/hdds/utils/HddsServerUtil.java   |   8 ++
 .../hdds/conf/TestReconfigurationHandler.java      |  92 ++++++++++++++++
 .../org/apache/hadoop/hdds/conf/package-info.java  |   9 +-
 .../hdds/scm/server/SCMClientProtocolServer.java   |  42 ++------
 .../hdds/scm/server/StorageContainerManager.java   |  51 ++++-----
 .../org/apache/hadoop/hdds/scm/HddsTestUtils.java  |  13 +++
 .../hadoop/ozone/TestStorageContainerManager.java  |   5 +-
 .../ozone/om/service/TestRangerBGSyncService.java  |   9 +-
 .../ozone/reconfig/ReconfigurationTestBase.java    |  86 +++++++++++++++
 .../reconfig/TestDatanodeReconfiguration.java      |  24 ++++-
 .../ozone/reconfig/TestOmReconfiguration.java      |  69 ++++++++++++
 .../hadoop/ozone/reconfig/TestOmReconfigure.java   | 120 ---------------------
 .../ozone/reconfig/TestScmReconfiguration.java     |  56 ++++++++++
 .../hadoop/ozone/reconfig/TestScmReconfigure.java  | 101 -----------------
 .../hadoop/ozone/shell/TestReconfigShell.java      |  44 ++++----
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  11 +-
 .../apache/hadoop/ozone/om/OmMetadataReader.java   |   6 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  92 +++-------------
 23 files changed, 560 insertions(+), 530 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java
index c18f877225..e26610b357 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java
@@ -18,15 +18,12 @@
 
 package org.apache.hadoop.ozone;
 
-import com.google.common.collect.Lists;
 import com.google.protobuf.BlockingService;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.ReconfigurationException;
-import org.apache.hadoop.conf.ReconfigurationTaskStatus;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.ReconfigureProtocol;
 import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos;
 import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolPB;
 import 
org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolServerSideTranslatorPB;
@@ -35,15 +32,11 @@ import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
 import org.apache.hadoop.hdds.utils.VersionInfo;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.util.Collection;
-import java.util.List;
 
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_CLIENT_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_HANDLER_COUNT_DEFAULT;
@@ -54,29 +47,25 @@ import static 
org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.CLIENT_R
 /**
  * The RPC server that listens to requests from clients.
  */
-public class HddsDatanodeClientProtocolServer extends ServiceRuntimeInfoImpl
-    implements ReconfigureProtocol {
+public class HddsDatanodeClientProtocolServer extends ServiceRuntimeInfoImpl {
   private static final Logger LOG =
       LoggerFactory.getLogger(HddsDatanodeClientProtocolServer.class);
   private final RPC.Server rpcServer;
-  private InetSocketAddress clientRpcAddress;
-  private final DatanodeDetails datanodeDetails;
-  private final HddsDatanodeService service;
+  private final InetSocketAddress clientRpcAddress;
   private final OzoneConfiguration conf;
 
-  protected HddsDatanodeClientProtocolServer(HddsDatanodeService service,
+  protected HddsDatanodeClientProtocolServer(
       DatanodeDetails datanodeDetails, OzoneConfiguration conf,
-      VersionInfo versionInfo) throws IOException {
+      VersionInfo versionInfo, ReconfigurationHandler reconfigurationHandler
+  ) throws IOException {
     super(versionInfo);
-    this.datanodeDetails = datanodeDetails;
-    this.service = service;
     this.conf = conf;
 
-    rpcServer = getRpcServer(conf);
+    rpcServer = getRpcServer(conf, reconfigurationHandler);
     clientRpcAddress = ServerUtils.updateRPCListenAddress(this.conf,
         HDDS_DATANODE_CLIENT_ADDRESS_KEY,
         HddsUtils.getDatanodeRpcAddress(conf), rpcServer);
-    this.datanodeDetails.setPort(CLIENT_RPC, clientRpcAddress.getPort());
+    datanodeDetails.setPort(CLIENT_RPC, clientRpcAddress.getPort());
   }
 
   public void start() {
@@ -98,57 +87,12 @@ public class HddsDatanodeClientProtocolServer extends 
ServiceRuntimeInfoImpl
     getClientRpcServer().join();
   }
 
-  @Override
-  public String getServerName() {
-    return "Datanode";
-  }
-
-  @Override
-  public void startReconfigure() throws IOException {
-    service.checkAdminUserPrivilege(getRemoteUser());
-    startReconfigurationTask();
-  }
-
-  @Override
-  public ReconfigurationTaskStatus getReconfigureStatus() throws IOException {
-    service.checkAdminUserPrivilege(getRemoteUser());
-    return getReconfigurationTaskStatus();
-  }
-
-  @Override
-  public List<String> listReconfigureProperties() throws IOException {
-    service.checkAdminUserPrivilege(getRemoteUser());
-    return Lists.newArrayList(service.getReconfigurableProperties());
-  }
-
-  // optimize ugi lookup for RPC operations to avoid a trip through
-  // UGI.getCurrentUser which is synch'ed
-  private static UserGroupInformation getRemoteUser() throws IOException {
-    UserGroupInformation ugi = Server.getRemoteUser();
-    return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser();
-  }
-
-  @Override
-  public Configuration getConf() {
-    return conf;
-  }
-
-  @Override
-  public Collection<String> getReconfigurableProperties() {
-    return service.getReconfigurableProperties();
-  }
-
-  @Override
-  public String reconfigurePropertyImpl(String property, String newVal)
-      throws ReconfigurationException {
-    return service.reconfigurePropertyImpl(property, newVal);
-  }
-
   /**
    * Creates a new instance of rpc server. If an earlier instance is already
    * running then returns the same.
    */
-  private RPC.Server getRpcServer(OzoneConfiguration configuration)
+  private RPC.Server getRpcServer(OzoneConfiguration configuration,
+      ReconfigurationHandler reconfigurationHandler)
       throws IOException {
     InetSocketAddress rpcAddress = HddsUtils.getDatanodeRpcAddress(conf);
     // Add reconfigureProtocolService.
@@ -158,7 +102,7 @@ public class HddsDatanodeClientProtocolServer extends 
ServiceRuntimeInfoImpl
     final int handlerCount = conf.getInt(HDDS_DATANODE_HANDLER_COUNT_KEY,
         HDDS_DATANODE_HANDLER_COUNT_DEFAULT);
     ReconfigureProtocolServerSideTranslatorPB reconfigureServerProtocol
-        = new ReconfigureProtocolServerSideTranslatorPB(this);
+        = new 
ReconfigureProtocolServerSideTranslatorPB(reconfigurationHandler);
     BlockingService reconfigureService = ReconfigureProtocolProtos
         .ReconfigureProtocolService.newReflectiveBlockingService(
             reconfigureServerProtocol);
@@ -201,9 +145,4 @@ public class HddsDatanodeClientProtocolServer extends 
ServiceRuntimeInfoImpl
   public InetSocketAddress getClientRpcAddress() {
     return clientRpcAddress;
   }
-
-  @Override
-  public void close() throws IOException {
-    stop();
-  }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index 7edb8d1836..c438779af5 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -22,18 +22,14 @@ import java.io.File;
 import java.io.IOException;
 import java.net.InetAddress;
 import java.util.Arrays;
-import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.SortedSet;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import com.google.common.collect.ImmutableSortedSet;
 import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.ReconfigurationException;
 import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdds.DatanodeVersion;
 import org.apache.hadoop.hdds.HddsUtils;
@@ -41,6 +37,7 @@ import org.apache.hadoop.hdds.StringUtils;
 import org.apache.hadoop.hdds.cli.GenericCli;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
 import org.apache.hadoop.hdds.datanode.metadata.DatanodeCRLStore;
 import org.apache.hadoop.hdds.datanode.metadata.DatanodeCRLStoreImpl;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -76,6 +73,7 @@ import com.google.common.base.Preconditions;
 
 import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.HTTP;
 import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.HTTPS;
+import static org.apache.hadoop.hdds.utils.HddsServerUtil.getRemoteUser;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
 import static 
org.apache.hadoop.ozone.conf.OzoneServiceConfig.DEFAULT_SHUTDOWN_HOOK_PRIORITY;
 import static org.apache.hadoop.ozone.common.Storage.StorageState.INITIALIZED;
@@ -117,9 +115,8 @@ public class HddsDatanodeService extends GenericCli 
implements ServicePlugin {
   private ObjectName dnInfoBeanName;
   private DatanodeCRLStore dnCRLStore;
   private HddsDatanodeClientProtocolServer clientProtocolServer;
-  private final SortedSet<String> reconfigurableProperties =
-      ImmutableSortedSet.of();
   private OzoneAdmins admins;
+  private ReconfigurationHandler reconfigurationHandler;
 
   //Constructor for DataNode PluginService
   public HddsDatanodeService() { }
@@ -321,8 +318,12 @@ public class HddsDatanodeService extends GenericCli 
implements ServicePlugin {
         LOG.error("HttpServer failed to start.", ex);
       }
 
+      reconfigurationHandler =
+          new ReconfigurationHandler("DN", conf, this::checkAdminPrivilege);
+
       clientProtocolServer = new HddsDatanodeClientProtocolServer(
-          this, datanodeDetails, conf, HddsVersionInfo.HDDS_VERSION_INFO);
+          datanodeDetails, conf, HddsVersionInfo.HDDS_VERSION_INFO,
+          reconfigurationHandler);
 
       // Get admin list
       String starterUser =
@@ -660,18 +661,14 @@ public class HddsDatanodeService extends GenericCli 
implements ServicePlugin {
   /**
    * Check ozone admin privilege, throws exception if not admin.
    */
-  public void checkAdminUserPrivilege(UserGroupInformation ugi)
+  private void checkAdminPrivilege(String operation)
       throws IOException {
+    final UserGroupInformation ugi = getRemoteUser();
     admins.checkAdminUserPrivilege(ugi);
   }
 
-  public String reconfigurePropertyImpl(String property, String newVal)
-      throws ReconfigurationException {
-    return "";
-  }
-
-  public Collection<String> getReconfigurableProperties() {
-    return reconfigurableProperties;
+  @VisibleForTesting
+  public ReconfigurationHandler getReconfigurationHandler() {
+    return reconfigurationHandler;
   }
-
 }
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/ReconfigurationHandler.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/ReconfigurationHandler.java
new file mode 100644
index 0000000000..8eae49d083
--- /dev/null
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/ReconfigurationHandler.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.ReconfigurableBase;
+import org.apache.hadoop.conf.ReconfigurationTaskStatus;
+import org.apache.hadoop.hdds.protocol.ReconfigureProtocol;
+import org.apache.ratis.util.function.CheckedConsumer;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.UnaryOperator;
+
+import static java.util.Collections.unmodifiableSet;
+import static java.util.function.UnaryOperator.identity;
+
+/**
+ * Keeps track of reconfigurable properties and the corresponding functions
+ * that implement reconfiguration.
+ */
+public class ReconfigurationHandler extends ReconfigurableBase
+    implements ReconfigureProtocol {
+
+  private final String name;
+  private final CheckedConsumer<String, IOException> requireAdminPrivilege;
+  private final Map<String, UnaryOperator<String>> properties =
+      new ConcurrentHashMap<>();
+
+  public ReconfigurationHandler(String name, OzoneConfiguration config,
+      CheckedConsumer<String, IOException> requireAdminPrivilege) {
+    this.name = name;
+    this.requireAdminPrivilege = requireAdminPrivilege;
+    setConf(config);
+  }
+
+  public ReconfigurationHandler register(
+      String property, UnaryOperator<String> reconfigureFunction) {
+    properties.put(property, reconfigureFunction);
+    return this;
+  }
+
+  @Override
+  protected Configuration getNewConf() {
+    return new OzoneConfiguration();
+  }
+
+  @Override
+  public Set<String> getReconfigurableProperties() {
+    return unmodifiableSet(properties.keySet());
+  }
+
+  @Override
+  public String reconfigurePropertyImpl(String property, String newValue) {
+    return properties.getOrDefault(property, identity())
+        .apply(newValue);
+  }
+
+  @Override
+  public String getServerName() {
+    return name;
+  }
+
+  @Override
+  public void startReconfigure() throws IOException {
+    requireAdminPrivilege.accept("startReconfiguration");
+    startReconfigurationTask();
+  }
+
+  @Override
+  public ReconfigurationTaskStatus getReconfigureStatus() throws IOException {
+    requireAdminPrivilege.accept("getReconfigurationStatus");
+    return getReconfigurationTaskStatus();
+  }
+
+  @Override
+  public List<String> listReconfigureProperties() throws IOException {
+    requireAdminPrivilege.accept("listReconfigurableProperties");
+    return new ArrayList<>(new TreeSet<>(getReconfigurableProperties()));
+  }
+
+  @Override
+  public void close() throws IOException {
+    shutdownReconfigurationTask();
+  }
+}
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
index 64d21d19ea..4c3c4aa13c 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -6,13 +6,16 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * <p>
+ *
  * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+/**
+ * Contains configuration-related classes.
+ */
 package org.apache.hadoop.hdds.conf;
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java
index 42d63dffe0..987f4aee03 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java
@@ -17,20 +17,13 @@
 
 package org.apache.hadoop.hdds.server;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.ReconfigurableBase;
-import org.apache.hadoop.conf.ReconfigurationException;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.VersionInfo;
 
-import java.util.Collection;
-
 /**
  * Helper base class to report the standard version and runtime information.
  *
  */
-public class ServiceRuntimeInfoImpl extends ReconfigurableBase
-    implements ServiceRuntimeInfo {
+public class ServiceRuntimeInfoImpl implements ServiceRuntimeInfo {
 
   private long startedTimeInMillis;
   private final VersionInfo versionInfo;
@@ -63,20 +56,4 @@ public class ServiceRuntimeInfoImpl extends 
ReconfigurableBase
   public void setStartTime() {
     startedTimeInMillis = System.currentTimeMillis();
   }
-
-  @Override
-  protected Configuration getNewConf() {
-    return new OzoneConfiguration();
-  }
-
-  @Override
-  public Collection<String> getReconfigurableProperties() {
-    return null;
-  }
-
-  @Override
-  protected String reconfigurePropertyImpl(String property, String newVal)
-      throws ReconfigurationException {
-    throw new ReconfigurationException();
-  }
 }
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
index 0f0d40e3ba..7e9cf31cd4 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hdds.tracing.TracingUtil;
 import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.metrics2.MetricsException;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -572,6 +573,13 @@ public final class HddsServerUtil {
     archiveOutputStream.closeArchiveEntry();
   }
 
+  // optimize ugi lookup for RPC operations to avoid a trip through
+  // UGI.getCurrentUser which is synch'ed
+  public static UserGroupInformation getRemoteUser() throws IOException {
+    UserGroupInformation ugi = Server.getRemoteUser();
+    return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser();
+  }
+
   /**
    * Converts RocksDB exception to IOE.
    * @param msg  - Message to add to exception.
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/conf/TestReconfigurationHandler.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/conf/TestReconfigurationHandler.java
new file mode 100644
index 0000000000..1166eee33d
--- /dev/null
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/conf/TestReconfigurationHandler.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import org.apache.ratis.util.function.CheckedConsumer;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Stream;
+
+import static java.util.stream.Collectors.toList;
+import static java.util.stream.Collectors.toSet;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+/**
+ * Test for {@link ReconfigurationHandler}.
+ */
+class TestReconfigurationHandler {
+
+  private static final String PROP_A = "some.test.property";
+  private static final String PROP_B = "other.property";
+  private static final CheckedConsumer<String, IOException> ACCEPT = any -> { 
};
+  private static final CheckedConsumer<String, IOException> DENY = any -> {
+    throw new IOException("access denied");
+  };
+
+  private final AtomicReference<String> refA =
+      new AtomicReference<>("oldA");
+  private final AtomicReference<String> refB =
+      new AtomicReference<>("oldB");
+  private final AtomicReference<CheckedConsumer<String, IOException>> 
adminCheck
+      = new AtomicReference<>(ACCEPT);
+
+  private final ReconfigurationHandler subject = new ReconfigurationHandler(
+      "test", new OzoneConfiguration(), op -> adminCheck.get().accept(op))
+              .register(PROP_A, refA::getAndSet)
+              .register(PROP_B, refB::getAndSet);
+
+  @Test
+  void getProperties() {
+    assertEquals(Stream.of(PROP_A, PROP_B).collect(toSet()),
+        subject.getReconfigurableProperties());
+  }
+
+  @Test
+  void listProperties() throws IOException {
+    assertEquals(Stream.of(PROP_A, PROP_B).sorted().collect(toList()),
+        subject.listReconfigureProperties());
+  }
+
+  @Test
+  void callsReconfigurationFunction() {
+    subject.reconfigurePropertyImpl(PROP_A, "newA");
+    assertEquals("newA", refA.get());
+
+    subject.reconfigurePropertyImpl(PROP_B, "newB");
+    assertEquals("newB", refB.get());
+  }
+
+  @Test
+  void ignoresUnknownProperty() {
+    assertDoesNotThrow(() ->
+        subject.reconfigurePropertyImpl("foobar", "some value"));
+  }
+
+  @Test
+  void requiresAdminAccess() {
+    adminCheck.set(DENY);
+    assertThrows(IOException.class, subject::listReconfigureProperties);
+    assertThrows(IOException.class, subject::startReconfigure);
+    assertThrows(IOException.class, subject::getReconfigureStatus);
+  }
+
+}
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/conf/package-info.java
similarity index 93%
copy from 
hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
copy to 
hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/conf/package-info.java
index 64d21d19ea..e19d8de013 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/conf/package-info.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -6,13 +6,16 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * <p>
+ *
  * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+/**
+ * Contains configuration-related tests.
+ */
 package org.apache.hadoop.hdds.conf;
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 0e6aecab18..292597bbac 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -24,17 +24,15 @@ package org.apache.hadoop.hdds.scm.server;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.protobuf.BlockingService;
 import com.google.protobuf.ProtocolMessageEnum;
 import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.conf.ReconfigurationTaskStatus;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.ReconfigureProtocol;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.ReconfigureProtocolService;
 import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo;
@@ -120,12 +118,13 @@ import static 
org.apache.hadoop.hdds.scm.ha.HASecurityUtils.createSCMRatisTLSCon
 import static 
org.apache.hadoop.hdds.scm.server.StorageContainerManager.startRpcServer;
 import static org.apache.hadoop.hdds.server.ServerUtils.getRemoteUserName;
 import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
+import static org.apache.hadoop.hdds.utils.HddsServerUtil.getRemoteUser;
 
 /**
  * The RPC server that listens to requests from clients.
  */
 public class SCMClientProtocolServer implements
-    StorageContainerLocationProtocol, ReconfigureProtocol, Auditor {
+    StorageContainerLocationProtocol, Auditor {
   private static final Logger LOG =
       LoggerFactory.getLogger(SCMClientProtocolServer.class);
   private static final AuditLogger AUDIT =
@@ -135,8 +134,11 @@ public class SCMClientProtocolServer implements
   private final StorageContainerManager scm;
   private final ProtocolMessageMetrics<ProtocolMessageEnum> protocolMetrics;
 
-  public SCMClientProtocolServer(OzoneConfiguration conf,
-      StorageContainerManager scm) throws IOException {
+  public SCMClientProtocolServer(
+      OzoneConfiguration conf,
+      StorageContainerManager scm,
+      ReconfigurationHandler reconfigurationHandler
+  ) throws IOException {
     this.scm = scm;
     final int handlerCount =
         conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY,
@@ -168,7 +170,7 @@ public class SCMClientProtocolServer implements
 
     // Add reconfigureProtocolService.
     ReconfigureProtocolServerSideTranslatorPB reconfigureServerProtocol
-        = new ReconfigureProtocolServerSideTranslatorPB(this);
+        = new 
ReconfigureProtocolServerSideTranslatorPB(reconfigurationHandler);
     BlockingService reconfigureService =
         ReconfigureProtocolService.newReflectiveBlockingService(
             reconfigureServerProtocol);
@@ -218,9 +220,6 @@ public class SCMClientProtocolServer implements
     getClientRpcServer().join();
   }
 
-  public UserGroupInformation getRemoteUser() {
-    return Server.getRemoteUser();
-  }
   @Override
   public ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType
       replicationType, HddsProtos.ReplicationFactor factor,
@@ -1299,29 +1298,6 @@ public class SCMClientProtocolServer implements
         .build();
   }
 
-  @Override
-  public String getServerName() throws IOException {
-    return "SCM";
-  }
-
-  @Override
-  public void startReconfigure() throws IOException {
-    getScm().checkAdminAccess(getRemoteUser());
-    getScm().startReconfigurationTask();
-  }
-
-  @Override
-  public ReconfigurationTaskStatus getReconfigureStatus() throws IOException {
-    getScm().checkAdminAccess(getRemoteUser());
-    return getScm().getReconfigurationTaskStatus();
-  }
-
-  @Override
-  public List<String> listReconfigureProperties() throws IOException {
-    getScm().checkAdminAccess(getRemoteUser());
-    return Lists.newArrayList(getScm().getReconfigurableProperties());
-  }
-
   @Override
   public void close() throws IOException {
     stop();
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index f2a5b0844c..3c076c90e4 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -23,20 +23,18 @@ package org.apache.hadoop.hdds.scm.server;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableSortedSet;
 import com.google.protobuf.BlockingService;
 
 import java.time.Duration;
-import java.util.SortedSet;
 import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.ReconfigurationException;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
@@ -192,6 +190,7 @@ import static 
org.apache.hadoop.hdds.ratis.RatisHelper.newJvmPauseMonitor;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_REPORT_EXEC_WAIT_THRESHOLD_DEFAULT;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_REPORT_QUEUE_WAIT_THRESHOLD_DEFAULT;
 import static 
org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore.CertType.VALID_CERTS;
+import static org.apache.hadoop.hdds.utils.HddsServerUtil.getRemoteUser;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
 import static org.apache.hadoop.ozone.OzoneConsts.CRL_SEQUENCE_ID_KEY;
 import static org.apache.hadoop.ozone.OzoneConsts.SCM_SUB_CA_PREFIX;
@@ -252,6 +251,7 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 
   private final EventQueue eventQueue;
   private final SCMServiceManager serviceManager;
+  private final ReconfigurationHandler reconfigurationHandler;
 
   /*
    * HTTP endpoint for JMX access.
@@ -304,12 +304,6 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
   private ContainerReplicaPendingOps containerReplicaPendingOps;
   private final AtomicBoolean isStopped = new AtomicBoolean(false);
 
-  /** A list of property that are reconfigurable at runtime. */
-  private final SortedSet<String> reconfigurableProperties =
-      ImmutableSortedSet.of(
-          OZONE_ADMINISTRATORS
-      );
-
   private Clock systemClock;
 
   /**
@@ -403,10 +397,15 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
     scmAdmins = OzoneAdmins.getOzoneAdmins(scmStarterUser, conf);
     LOG.info("SCM start with adminUsers: {}", scmAdmins.getAdminUsernames());
 
+    reconfigurationHandler =
+        new ReconfigurationHandler("SCM", conf, this::checkAdminAccess)
+            .register(OZONE_ADMINISTRATORS, this::reconfOzoneAdmins);
+
     datanodeProtocolServer = new SCMDatanodeProtocolServer(conf, this,
         eventQueue, scmContext);
     blockProtocolServer = new SCMBlockProtocolServer(conf, this);
-    clientProtocolServer = new SCMClientProtocolServer(conf, this);
+    clientProtocolServer = new SCMClientProtocolServer(conf, this,
+        reconfigurationHandler);
 
     initializeEventHandlers();
 
@@ -422,7 +421,6 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
     registerMetricsSource(this);
   }
 
-
   private void initializeEventHandlers() {
     long timeDuration = configuration.getTimeDuration(
         OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION,
@@ -1839,6 +1837,10 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
     }
   }
 
+  private void checkAdminAccess(String op) throws IOException {
+    checkAdminAccess(getRemoteUser());
+  }
+
   public void checkAdminAccess(UserGroupInformation remoteUser)
       throws IOException {
     if (remoteUser != null && !scmAdmins.isAdmin(remoteUser)) {
@@ -2085,28 +2087,6 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
     return scmAdmins.getAdminUsernames();
   }
 
-  // ReconfigurableBase get base configuration
-  @Override
-  public Configuration getConf() {
-    return getConfiguration();
-  }
-
-  @Override // ReconfigurableBase
-  public Collection<String> getReconfigurableProperties() {
-    return reconfigurableProperties;
-  }
-
-  @Override // ReconfigurableBase
-  public String reconfigurePropertyImpl(String property, String newVal)
-      throws ReconfigurationException {
-    if (property.equals(OZONE_ADMINISTRATORS)) {
-      return reconfOzoneAdmins(newVal);
-    } else {
-      throw new ReconfigurationException(property, newVal,
-          getConfiguration().get(property));
-    }
-  }
-
   private String reconfOzoneAdmins(String newVal) {
     getConfiguration().set(OZONE_ADMINISTRATORS, newVal);
     Collection<String> admins = OzoneAdmins.getOzoneAdminsFromConfig(
@@ -2164,4 +2144,9 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 
     scmHAMetrics = SCMHAMetrics.create(getScmId(), leaderId);
   }
+
+  @VisibleForTesting
+  public ReconfigurationHandler getReconfigurationHandler() {
+    return reconfigurationHandler;
+  }
 }
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
index eb0741662c..a577427975 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
@@ -74,10 +74,13 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer;
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.Storage;
 import 
org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client
     .AuthenticationException;
 
@@ -91,6 +94,9 @@ import java.util.UUID;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeoutException;
 
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.when;
+
 /**
  * Stateless helper functions for Hdds tests.
  */
@@ -868,4 +874,11 @@ public final class HddsTestUtils {
                     .setReplicaIndex(replicaIndex)
                     .build();
   }
+
+  public static void mockRemoteUser(UserGroupInformation ugi) {
+    Server.Call call = spy(new Server.Call(1, 1, null, null,
+        RPC.RpcKind.RPC_BUILTIN, new byte[] {1, 2, 3}));
+    when(call.getRemoteUser()).thenReturn(ugi);
+    Server.getCurCall().set(call);
+  }
 }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index ad16db7628..e8e09c21f2 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -128,11 +128,11 @@ import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERV
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION;
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
 import static 
org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils.setInternalState;
+import static org.apache.hadoop.hdds.scm.HddsTestUtils.mockRemoteUser;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.argThat;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
 import static org.mockito.Mockito.verify;
 
 /**
@@ -197,8 +197,7 @@ public class TestStorageContainerManager {
       SCMClientProtocolServer mockClientServer = Mockito.spy(
           cluster.getStorageContainerManager().getClientProtocolServer());
 
-      when(mockClientServer.getRemoteUser()).thenReturn(
-          UserGroupInformation.createRemoteUser(fakeRemoteUsername));
+      
mockRemoteUser(UserGroupInformation.createRemoteUser(fakeRemoteUsername));
 
       try {
         mockClientServer.deleteContainer(
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
index ead4e60973..dd35b06642 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
@@ -16,6 +16,7 @@
  */
 package org.apache.hadoop.ozone.om.service;
 
+import static org.apache.hadoop.hdds.scm.HddsTestUtils.mockRemoteUser;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RANGER_HTTPS_ADMIN_API_PASSWD;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RANGER_HTTPS_ADMIN_API_USER;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_HTTPS_ADDRESS_KEY;
@@ -27,7 +28,6 @@ import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.framework;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
 import java.io.File;
@@ -45,8 +45,6 @@ import java.util.concurrent.TimeUnit;
 import com.google.protobuf.ServiceException;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.audit.AuditLogger;
 import org.apache.hadoop.ozone.audit.AuditMessage;
@@ -183,11 +181,8 @@ public class TestRangerBGSyncService {
 
     ozoneManager = mock(OzoneManager.class);
 
-    Server.Call call = spy(new Server.Call(1, 1, null, null,
-        RPC.RpcKind.RPC_BUILTIN, new byte[] {1, 2, 3}));
     // Run as alice, so that Server.getRemoteUser() won't return null.
-    when(call.getRemoteUser()).thenReturn(ugiAlice);
-    Server.getCurCall().set(call);
+    mockRemoteUser(ugiAlice);
 
     String omID = UUID.randomUUID().toString();
     final String path = GenericTestUtils.getTempPath(omID);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/ReconfigurationTestBase.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/ReconfigurationTestBase.java
new file mode 100644
index 0000000000..e176f64282
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/ReconfigurationTestBase.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.reconfig;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.TestInstance;
+import org.junit.jupiter.api.Timeout;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Set;
+import java.util.TreeSet;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
+
+/**
+ * Tests for Reconfiguration.
+ */
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+@Timeout(300)
+abstract class ReconfigurationTestBase {
+
+  private MiniOzoneCluster cluster;
+  private String currentUser;
+
+  @BeforeAll
+  void setup() throws Exception {
+    cluster = MiniOzoneCluster.newBuilder(new OzoneConfiguration())
+        .build();
+    cluster.waitForClusterToBeReady();
+    currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
+  }
+
+  @AfterAll
+  void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  abstract ReconfigurationHandler getSubject();
+
+  MiniOzoneCluster getCluster() {
+    return cluster;
+  }
+
+  String getCurrentUser() {
+    return currentUser;
+  }
+
+  static void assertProperties(ReconfigurationHandler subject,
+      Set<String> expected) {
+
+    assertEquals(expected, subject.getReconfigurableProperties());
+
+    try {
+      assertEquals(new ArrayList<>(new TreeSet<>(expected)),
+          subject.listReconfigureProperties());
+    } catch (IOException e) {
+      fail("Unexpected exception", e);
+    }
+  }
+
+}
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestDatanodeReconfiguration.java
similarity index 59%
copy from 
hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
copy to 
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestDatanodeReconfiguration.java
index 64d21d19ea..7a26832aaf 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestDatanodeReconfiguration.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -15,4 +15,24 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdds.conf;
+package org.apache.hadoop.ozone.reconfig;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
+import org.junit.jupiter.api.Test;
+
+/**
+ * Tests for Datanode reconfiguration.
+ */
+class TestDatanodeReconfiguration extends ReconfigurationTestBase {
+  @Override
+  ReconfigurationHandler getSubject() {
+    return getCluster().getHddsDatanodes().get(0).getReconfigurationHandler();
+  }
+
+  @Test
+  void reconfigurableProperties() {
+    assertProperties(getSubject(), ImmutableSet.of());
+  }
+
+}
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestOmReconfiguration.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestOmReconfiguration.java
new file mode 100644
index 0000000000..daa9e65467
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestOmReconfiguration.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.reconfig;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
+import org.junit.jupiter.api.Test;
+
+import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * Tests for OM reconfiguration.
+ */
+class TestOmReconfiguration extends ReconfigurationTestBase {
+
+  @Override
+  ReconfigurationHandler getSubject() {
+    return getCluster().getOzoneManager().getReconfigurationHandler();
+  }
+
+  @Test
+  void reconfigurableProperties() {
+    assertProperties(getSubject(),
+        ImmutableSet.of(OZONE_ADMINISTRATORS, OZONE_READONLY_ADMINISTRATORS));
+  }
+
+  @Test
+  void adminUsernames() {
+    final String newValue = randomAlphabetic(10);
+
+    getSubject().reconfigurePropertyImpl(OZONE_ADMINISTRATORS, newValue);
+
+    assertEquals(
+        ImmutableSet.of(newValue, getCurrentUser()),
+        getCluster().getOzoneManager().getOmAdminUsernames());
+  }
+
+  @Test
+  void readOnlyAdmins() {
+    final String newValue = randomAlphabetic(10);
+
+    getSubject().reconfigurePropertyImpl(OZONE_READONLY_ADMINISTRATORS,
+        newValue);
+
+    assertEquals(
+        ImmutableSet.of(newValue),
+        getCluster().getOzoneManager().getOmReadOnlyAdminUsernames());
+  }
+
+}
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestOmReconfigure.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestOmReconfigure.java
deleted file mode 100644
index 2488f309fa..0000000000
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestOmReconfigure.java
+++ /dev/null
@@ -1,120 +0,0 @@
-package org.apache.hadoop.ozone.reconfig;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- *
- */
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
-import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import java.util.UUID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-/**
- * Tests for OM Reconfigure.
- */
-public class TestOmReconfigure {
-
-  /**
-   * Set a timeout for each test.
-   */
-  @Rule
-  public Timeout timeout = new Timeout(300000);
-  private OzoneConfiguration conf;
-  private MiniOzoneHAClusterImpl cluster;
-  private OzoneManager ozoneManager;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   */
-  @Before
-  public void setup() throws Exception {
-
-    conf = new OzoneConfiguration();
-    String omServiceId = UUID.randomUUID().toString();
-    cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf)
-        .setClusterId(UUID.randomUUID().toString())
-        .setScmId(UUID.randomUUID().toString())
-        .setOMServiceId(omServiceId)
-        .setNumOfOzoneManagers(3)
-        .setNumDatanodes(1)
-        .build();
-
-    cluster.waitForClusterToBeReady();
-    ozoneManager = cluster.getOzoneManager();
-
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  /**
-   * Test reconfigure om "ozone.administrators".
-   */
-  @Test
-  public void testOmAdminUsersReconfigure() throws Exception {
-    String userA = "mockUserA";
-    String userB = "mockUserB";
-    conf.set(OZONE_ADMINISTRATORS, userA);
-    ozoneManager.reconfigurePropertyImpl(OZONE_ADMINISTRATORS, userA);
-    assertTrue(userA + " should be an admin user",
-        ozoneManager.getOmAdminUsernames().contains(userA));
-
-    conf.set(OZONE_ADMINISTRATORS, userB);
-    ozoneManager.reconfigurePropertyImpl(OZONE_ADMINISTRATORS, userB);
-    assertFalse(userA + " should NOT be an admin user",
-        ozoneManager.getOmAdminUsernames().contains(userA));
-    assertTrue(userB + " should be an admin user",
-        ozoneManager.getOmAdminUsernames().contains(userB));
-  }
-
-  /**
-   * Test reconfigure om "ozone.readonly.administrators".
-   */
-  @Test
-  public void testOmReadOnlyUsersReconfigure() throws Exception {
-    String userA = "mockUserA";
-    String userB = "mockUserB";
-    conf.set(OZONE_READONLY_ADMINISTRATORS, userA);
-    ozoneManager.reconfigurePropertyImpl(OZONE_READONLY_ADMINISTRATORS, userA);
-    assertTrue(userA + " should be a readOnly admin user",
-        ozoneManager.getOmReadOnlyAdminUsernames().contains(userA));
-
-    conf.set(OZONE_READONLY_ADMINISTRATORS, userB);
-    ozoneManager.reconfigurePropertyImpl(OZONE_READONLY_ADMINISTRATORS, userB);
-    assertFalse(userA + " should NOT be a admin user",
-        ozoneManager.getOmReadOnlyAdminUsernames().contains(userA));
-    assertTrue(userB + " should be a admin user",
-        ozoneManager.getOmReadOnlyAdminUsernames().contains(userB));
-  }
-}
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestScmReconfiguration.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestScmReconfiguration.java
new file mode 100644
index 0000000000..84045325dc
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestScmReconfiguration.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.reconfig;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
+import org.junit.jupiter.api.Test;
+
+import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * Tests for SCM reconfiguration.
+ */
+class TestScmReconfiguration extends ReconfigurationTestBase {
+
+  @Override
+  ReconfigurationHandler getSubject() {
+    return getCluster().getStorageContainerManager()
+        .getReconfigurationHandler();
+  }
+
+  @Test
+  void reconfigurableProperties() {
+    assertProperties(getSubject(), ImmutableSet.of(OZONE_ADMINISTRATORS));
+  }
+
+  @Test
+  void adminUsernames() {
+    final String newValue = randomAlphabetic(10);
+
+    getSubject().reconfigurePropertyImpl(OZONE_ADMINISTRATORS, newValue);
+
+    assertEquals(
+        ImmutableSet.of(newValue, getCurrentUser()),
+        getCluster().getStorageContainerManager().getScmAdminUsernames());
+  }
+
+}
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestScmReconfigure.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestScmReconfigure.java
deleted file mode 100644
index 1e48f3cb30..0000000000
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestScmReconfigure.java
+++ /dev/null
@@ -1,101 +0,0 @@
-package org.apache.hadoop.ozone.reconfig;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- *
- */
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import java.util.UUID;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Tests for SCM Reconfigure.
- */
-public class TestScmReconfigure {
-
-  /**
-   * Set a timeout for each test.
-   */
-  @Rule
-  public Timeout timeout = new Timeout(300000);
-  private OzoneConfiguration conf;
-  private MiniOzoneHAClusterImpl cluster;
-  private StorageContainerManager scm;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   */
-  @Before
-  public void setup() throws Exception {
-
-    conf = new OzoneConfiguration();
-    String omServiceId = UUID.randomUUID().toString();
-    cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf)
-        .setClusterId(UUID.randomUUID().toString())
-        .setScmId(UUID.randomUUID().toString())
-        .setOMServiceId(omServiceId)
-        .setNumOfOzoneManagers(3)
-        .setNumDatanodes(1)
-        .build();
-
-    cluster.waitForClusterToBeReady();
-    scm = cluster.getStorageContainerManager();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  /**
-   * Test reconfigure scm "ozone.administrators".
-   */
-  @Test
-  public void testOmAdminUsersReconfigure() throws Exception {
-    String userA = "mockUserA";
-    String userB = "mockUserB";
-    conf.set(OZONE_ADMINISTRATORS, userA);
-    scm.reconfigurePropertyImpl(OZONE_ADMINISTRATORS, userA);
-    assertTrue(userA + " should be an admin user",
-        scm.getScmAdminUsernames().contains(userA));
-
-    conf.set(OZONE_ADMINISTRATORS, userB);
-    scm.reconfigurePropertyImpl(OZONE_ADMINISTRATORS, userB);
-    assertFalse(userA + " should NOT be an admin user",
-        scm.getScmAdminUsernames().contains(userA));
-    assertTrue(userB + " should be an admin user",
-        scm.getScmAdminUsernames().contains(userB));
-  }
-
-}
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java
index 4ab9c96276..849000c580 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java
@@ -23,6 +23,7 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.UUID;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.ReconfigurableBase;
 import org.apache.hadoop.hdds.cli.OzoneAdmin;
@@ -46,20 +47,22 @@ import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalSt
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
 
 /**
- * * Integration test for Ozone reconfig shell command. HA enabled.
+ * * Integration test for {@code ozone admin reconfig} command. HA enabled.
  */
 public class TestReconfigShell {
+
+  private static final int DATANODE_COUNT = 3;
+
   /**
    * Set a timeout for each test.
    */
   @Rule
-  public Timeout timeout = new Timeout(300000);
+  public Timeout timeout = new Timeout(5, TimeUnit.MINUTES);
   private static MiniOzoneCluster cluster;
   private static List<HddsDatanodeService> datanodeServices;
   private static OzoneAdmin ozoneAdmin;
   private static OzoneManager ozoneManager;
   private static StorageContainerManager storageContainerManager;
-  private static int datanodeCount = 3;
   private static NodeManager nm;
 
 
@@ -76,7 +79,7 @@ public class TestReconfigShell {
         .setOMServiceId(omServiceId)
         .setNumOfOzoneManagers(1)
         .setNumOfStorageContainerManagers(1)
-        .setNumDatanodes(datanodeCount)
+        .setNumDatanodes(DATANODE_COUNT)
         .build();
     cluster.waitForClusterToBeReady();
     ozoneAdmin = new OzoneAdmin(cluster.getConf());
@@ -100,7 +103,8 @@ public class TestReconfigShell {
         HddsDatanodeClientProtocolServer server =
             datanodeService.getClientProtocolServer();
         InetSocketAddress socket = server.getClientRpcAddress();
-        exectureAndAssertProperties(server, socket, capture);
+        executeAndAssertProperties(datanodeService.getReconfigurationHandler(),
+            socket, capture);
       }
     }
   }
@@ -109,20 +113,22 @@ public class TestReconfigShell {
   public void testOzoneManagerGetReconfigurationProperties() throws Exception {
     try (SystemOutCapturer capture = new SystemOutCapturer()) {
       InetSocketAddress socket = ozoneManager.getOmRpcServerAddr();
-      exectureAndAssertProperties(ozoneManager, socket, capture);
+      executeAndAssertProperties(ozoneManager.getReconfigurationHandler(),
+          socket, capture);
     }
   }
 
   @Test
-  public void testStorageContainerManagerrGetReconfigurationProperties()
+  public void testStorageContainerManagerGetReconfigurationProperties()
       throws Exception {
     try (SystemOutCapturer capture = new SystemOutCapturer()) {
       InetSocketAddress socket = storageContainerManager.getClientRpcAddress();
-      exectureAndAssertProperties(storageContainerManager, socket, capture);
+      executeAndAssertProperties(
+          storageContainerManager.getReconfigurationHandler(), socket, 
capture);
     }
   }
 
-  private void exectureAndAssertProperties(
+  private void executeAndAssertProperties(
       ReconfigurableBase reconfigurableBase,
       InetSocketAddress socket, SystemOutCapturer capture)
       throws UnsupportedEncodingException {
@@ -148,15 +154,15 @@ public class TestReconfigShell {
   public void testDatanodeBulkReconfig() throws Exception {
     // All Dn are normal, So All the Dn will be reconfig
     List<HddsDatanodeService> dns = cluster.getHddsDatanodes();
-    Assert.assertEquals(datanodeCount, dns.size());
-    executeAndAssertBulkReconfigCount(datanodeCount);
+    Assert.assertEquals(DATANODE_COUNT, dns.size());
+    executeAndAssertBulkReconfigCount(DATANODE_COUNT);
 
     // Shutdown a Dn, it will not be reconfig,
     // so only (datanodeCount - 1) Dn will be configured successfully
     cluster.shutdownHddsDatanode(0);
-    executeAndAssertBulkReconfigCount(datanodeCount - 1);
+    executeAndAssertBulkReconfigCount(DATANODE_COUNT - 1);
     cluster.restartHddsDatanode(0, true);
-    executeAndAssertBulkReconfigCount(datanodeCount);
+    executeAndAssertBulkReconfigCount(DATANODE_COUNT);
 
     // DECOMMISSIONED a Dn, it will not be reconfig,
     // so only (datanodeCount - 1) Dn will be configured successfully
@@ -164,11 +170,11 @@ public class TestReconfigShell {
     storageContainerManager.getScmDecommissionManager()
         .startDecommission(details);
     nm.setNodeOperationalState(details, DECOMMISSIONED);
-    executeAndAssertBulkReconfigCount(datanodeCount - 1);
+    executeAndAssertBulkReconfigCount(DATANODE_COUNT - 1);
     storageContainerManager.getScmDecommissionManager()
         .recommission(details);
     nm.setNodeOperationalState(details, IN_SERVICE);
-    executeAndAssertBulkReconfigCount(datanodeCount);
+    executeAndAssertBulkReconfigCount(DATANODE_COUNT);
   }
 
   private void executeAndAssertBulkReconfigCount(int except)
@@ -184,12 +190,4 @@ public class TestReconfigShell {
               String.format("successfully %d", except)));
     }
   }
-
-
-  static class TestOzoneAdmin extends OzoneAdmin {
-    @Override
-    public OzoneConfiguration createOzoneConfiguration() {
-      return super.createOzoneConfiguration();
-    }
-  }
 }
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index f4fc52fc12..9775d36c98 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -58,7 +58,6 @@ import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.common.BlockGroup;
@@ -93,7 +92,6 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.ozone.security.acl.RequestContext;
 import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -106,6 +104,7 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ;
+import static org.apache.hadoop.hdds.utils.HddsServerUtil.getRemoteUser;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
@@ -330,14 +329,6 @@ public class KeyManagerImpl implements KeyManager {
     return metadataManager.getBucketTable().get(bucketKey);
   }
 
-  /* Optimize ugi lookup for RPC operations to avoid a trip through
-   * UGI.getCurrentUser which is synch'ed.
-   */
-  public static UserGroupInformation getRemoteUser() throws IOException {
-    UserGroupInformation ugi = Server.getRemoteUser();
-    return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser();
-  }
-
   private EncryptedKeyVersion generateEDEK(
       final String ezKeyName) throws IOException {
     if (ezKeyName == null) {
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java
index d8c0ac0e7d..2b97e67798 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java
@@ -47,11 +47,11 @@ import java.util.List;
 import java.util.Map;
 
 import static org.apache.hadoop.hdds.server.ServerUtils.getRemoteUserName;
+import static org.apache.hadoop.hdds.utils.HddsServerUtil.getRemoteUser;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE_DEFAULT;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE_MAX;
-import static org.apache.hadoop.ozone.om.KeyManagerImpl.getRemoteUser;
 import static org.apache.hadoop.ozone.om.OzoneManager.getS3Auth;
 import static 
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
@@ -130,7 +130,6 @@ public class OmMetadataReader implements IOmMetadataReader, 
Auditor {
    *
    * @param args - attributes of the key.
    * @return OmKeyInfo - the info about the requested key.
-   * @throws IOException
    */
   @Override
   public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException {
@@ -523,9 +522,6 @@ public class OmMetadataReader implements IOmMetadataReader, 
Auditor {
    * Looks up the configuration to see if there is custom class specified.
    * Constructs the instance by passing the configuration directly to the
    * constructor to achieve thread safety using final fields.
-   *
-   * @param conf
-   * @return IAccessAuthorizer
    */
   private IAccessAuthorizer getACLAuthorizerInstance(OzoneConfiguration conf) {
     Class<? extends IAccessAuthorizer> clazz = conf.getClass(
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 1138420918..e07391bbfc 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -44,7 +44,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
-import java.util.SortedSet;
 import java.util.Timer;
 import java.util.TimerTask;
 import java.util.concurrent.ConcurrentHashMap;
@@ -52,11 +51,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
 import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableSortedSet;
-import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.ReconfigurationException;
-import org.apache.hadoop.conf.ReconfigurationTaskStatus;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
@@ -73,7 +68,7 @@ import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.ConfigurationException;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.ReconfigureProtocol;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.ReconfigureProtocolService;
 import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolPB;
@@ -233,6 +228,7 @@ import static 
org.apache.hadoop.hdds.HddsUtils.preserveThreadName;
 import static org.apache.hadoop.hdds.ratis.RatisHelper.newJvmPauseMonitor;
 import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
 import static org.apache.hadoop.hdds.utils.HAUtils.getScmInfo;
+import static org.apache.hadoop.hdds.utils.HddsServerUtil.getRemoteUser;
 import static org.apache.hadoop.ozone.OmUtils.MAX_TRXN_ID;
 import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
@@ -317,8 +313,7 @@ import org.slf4j.LoggerFactory;
  */
 @InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
 public final class OzoneManager extends ServiceRuntimeInfoImpl
-    implements OzoneManagerProtocol, OMInterServiceProtocol,
-    ReconfigureProtocol, OMMXBean, Auditor {
+    implements OzoneManagerProtocol, OMInterServiceProtocol, OMMXBean, Auditor 
{
   public static final Logger LOG =
       LoggerFactory.getLogger(OzoneManager.class);
 
@@ -333,6 +328,8 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
       new ThreadLocal<>();
 
   private static boolean securityEnabled = false;
+
+  private final ReconfigurationHandler reconfigurationHandler;
   private OzoneDelegationTokenSecretManager delegationTokenMgr;
   private OzoneBlockTokenSecretManager blockTokenMgr;
   private CertificateClient certClient;
@@ -466,13 +463,6 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
   private OmMetadataReader omMetadataReader;
   private OmSnapshotManager omSnapshotManager;
 
-  /** A list of property that are reconfigurable at runtime. */
-  private final SortedSet<String> reconfigurableProperties =
-      ImmutableSortedSet.of(
-          OZONE_ADMINISTRATORS,
-          OZONE_READONLY_ADMINISTRATORS
-      );
-
   @SuppressWarnings("methodlength")
   private OzoneManager(OzoneConfiguration conf, StartupOption startupOption)
       throws IOException, AuthenticationException {
@@ -490,6 +480,11 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
     omStorage = new OMStorage(conf);
     omStorage.validateOrPersistOmNodeId(omNodeDetails.getNodeId());
     omId = omStorage.getOmId();
+    reconfigurationHandler =
+        new ReconfigurationHandler("OM", conf, this::checkAdminUserPrivilege)
+            .register(OZONE_ADMINISTRATORS, this::reconfOzoneAdmins)
+            .register(OZONE_READONLY_ADMINISTRATORS,
+                this::reconfOzoneReadOnlyAdmins);
 
     versionManager = new OMLayoutVersionManager(omStorage.getLayoutVersion());
     upgradeFinalizer = new OMUpgradeFinalizer(versionManager);
@@ -1203,7 +1198,7 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
             omMetadataServerProtocol);
 
     ReconfigureProtocolServerSideTranslatorPB reconfigureServerProtocol
-        = new ReconfigureProtocolServerSideTranslatorPB(this);
+        = new 
ReconfigureProtocolServerSideTranslatorPB(reconfigurationHandler);
     BlockingService reconfigureService =
         ReconfigureProtocolService.newReflectiveBlockingService(
             reconfigureServerProtocol);
@@ -2292,13 +2287,6 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
     return authMethod;
   }
 
-  // optimize ugi lookup for RPC operations to avoid a trip through
-  // UGI.getCurrentUser which is synch'ed
-  private static UserGroupInformation getRemoteUser() throws IOException {
-    UserGroupInformation ugi = Server.getRemoteUser();
-    return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser();
-  }
-
   /**
    * Get delegation token from OzoneManager.
    *
@@ -3858,12 +3846,6 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
     return LOG;
   }
 
-  // ReconfigurableBase get base configuration
-  @Override
-  public Configuration getConf() {
-    return getConfiguration();
-  }
-
   public OzoneConfiguration getConfiguration() {
     return configuration;
   }
@@ -4047,7 +4029,7 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
   /**
    * Check ozone admin privilege, throws exception if not admin.
    */
-  public void checkAdminUserPrivilege(String operation) throws IOException {
+  private void checkAdminUserPrivilege(String operation) throws IOException {
     final UserGroupInformation ugi = getRemoteUser();
     if (!isAdmin(ugi)) {
       throw new OMException("Only Ozone admins are allowed to " + operation,
@@ -4534,51 +4516,6 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
         fromSnapshot, toSnapshot, token, pageSize, forceFullDiff);
   }
 
-  @Override // ReconfigureProtocol
-  public String getServerName() {
-    return "OM";
-  }
-
-  @Override // ReconfigureProtocol
-  public void startReconfigure() throws IOException {
-    String operationName = "startOmReconfiguration";
-    checkAdminUserPrivilege(operationName);
-    startReconfigurationTask();
-  }
-
-  @Override // ReconfigureProtocol
-  public ReconfigurationTaskStatus getReconfigureStatus()
-      throws IOException {
-    String operationName = "getOmReconfigurationStatus";
-    checkAdminUserPrivilege(operationName);
-    return getReconfigurationTaskStatus();
-  }
-
-  @Override // ReconfigureProtocol
-  public List<String> listReconfigureProperties() throws IOException {
-    String operationName = "listOmReconfigurableProperties";
-    checkAdminUserPrivilege(operationName);
-    return Lists.newArrayList(getReconfigurableProperties());
-  }
-
-  @Override // ReconfigurableBase
-  public Collection<String> getReconfigurableProperties() {
-    return reconfigurableProperties;
-  }
-
-  @Override // ReconfigurableBase
-  public String reconfigurePropertyImpl(String property, String newVal)
-      throws ReconfigurationException {
-    if (property.equals(OZONE_ADMINISTRATORS)) {
-      return reconfOzoneAdmins(newVal);
-    } else if (property.equals(OZONE_READONLY_ADMINISTRATORS)) {
-      return reconfOzoneReadOnlyAdmins(newVal);
-    } else {
-      throw new ReconfigurationException(property, newVal,
-          getConfiguration().get(property));
-    }
-  }
-
   private String reconfOzoneAdmins(String newVal) {
     getConfiguration().set(OZONE_ADMINISTRATORS, newVal);
     Collection<String> admins =
@@ -4614,4 +4551,9 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
   public ReplicationConfigValidator getReplicationConfigValidator() {
     return replicationConfigValidator;
   }
+
+  @VisibleForTesting
+  public ReconfigurationHandler getReconfigurationHandler() {
+    return reconfigurationHandler;
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to