This is an automated email from the ASF dual-hosted git repository.

msingh pushed a commit to branch HDDS-1550
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit a8a1727fbdf1c94fdbf4c57b098b0ce013c88fee
Author: Mukul Kumar Singh <msi...@apache.org>
AuthorDate: Fri May 17 09:17:27 2019 +0530

    HDDS-1550. MiniOzoneChaosCluster is not shutting down all the threads 
during shutdown. Contributed by Mukul Kumar Singh.
---
 .../ozone/container/common/interfaces/Handler.java    |  5 +++++
 .../ozone/container/common/report/ReportManager.java  | 10 ++++++++++
 .../common/transport/server/XceiverServerGrpc.java    |  6 ++++++
 .../ozone/container/keyvalue/KeyValueHandler.java     |  5 +++++
 .../ozone/container/ozoneimpl/OzoneContainer.java     |  1 +
 .../apache/hadoop/hdds/scm/node/SCMNodeManager.java   |  1 +
 .../hdds/scm/server/StorageContainerManager.java      |  8 +++++++-
 .../apache/hadoop/ozone/MiniOzoneChaosCluster.java    |  2 +-
 .../org/apache/hadoop/ozone/MiniOzoneClusterImpl.java | 19 +++++++++++--------
 9 files changed, 47 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
index a3bb34b..621dffb 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
@@ -121,6 +121,11 @@ public abstract class Handler {
       throws IOException;
 
   /**
+   * Stop the Handler.
+   */
+  public abstract void stop();
+
+  /**
    * Marks the container for closing. Moves the container to CLOSING state.
    *
    * @param container container to update
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
index 8097cd6..ef592a6 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
@@ -23,10 +23,13 @@ import com.google.protobuf.GeneratedMessage;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
 
 /**
  * ReportManager is responsible for managing all the {@link ReportPublisher}
@@ -34,6 +37,8 @@ import java.util.concurrent.ScheduledExecutorService;
  * which should be used for scheduling the reports.
  */
 public final class ReportManager {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ReportManager.class);
 
   private final StateContext context;
   private final List<ReportPublisher> publishers;
@@ -71,6 +76,11 @@ public final class ReportManager {
    */
   public void shutdown() {
     executorService.shutdown();
+    try {
+      executorService.awaitTermination(1, TimeUnit.DAYS);
+    } catch (Exception e) {
+      LOG.error("failed to shutdown Report Manager", e);
+    }
   }
 
   /**
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
index 6fe8fd4..3987cee 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
@@ -57,6 +57,7 @@ import java.net.SocketAddress;
 import java.util.Collections;
 import java.util.List;
 import java.util.UUID;
+import java.util.concurrent.TimeUnit;
 
 /**
  * Creates a Grpc server endpoint that acts as the communication layer for
@@ -172,6 +173,11 @@ public final class XceiverServerGrpc extends XceiverServer 
{
   public void stop() {
     if (isStarted) {
       server.shutdown();
+      try {
+        server.awaitTermination(1, TimeUnit.DAYS);
+      } catch (Exception e) {
+        LOG.error("failed to shutdown XceiverServerGrpc", e);
+      }
       isStarted = false;
     }
   }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 531fb02..0355973e 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -159,6 +159,11 @@ public class KeyValueHandler extends Handler {
   }
 
   @Override
+  public void stop() {
+    blockDeletingService.shutdown();
+  }
+
+  @Override
   public ContainerCommandResponseProto handle(
       ContainerCommandRequestProto request, Container container,
       DispatcherContext dispatcherContext) {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index f34334d..e04dd01 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -200,6 +200,7 @@ public class OzoneContainer {
     stopContainerScrub();
     writeChannel.stop();
     readChannel.stop();
+    this.handlers.values().forEach(Handler::stop);
     hddsDispatcher.shutdown();
     volumeSet.shutdown();
   }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 2ab7295..da45ced 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -184,6 +184,7 @@ public class SCMNodeManager implements NodeManager {
   public void close() throws IOException {
     unregisterMXBean();
     metrics.unRegister();
+    nodeStateManager.close();
   }
 
   /**
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 8c4a514..a5e67e8 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -83,6 +83,7 @@ import org.apache.hadoop.hdds.tracing.TracingUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -205,6 +206,7 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
   private final OzoneConfiguration configuration;
   private final SafeModeHandler safeModeHandler;
   private SCMContainerMetrics scmContainerMetrics;
+  private MetricsSystem ms;
 
   /**
    * Creates a new StorageContainerManager. Configuration will be
@@ -898,7 +900,7 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
         buildRpcServerStartMessage(
             "StorageContainerLocationProtocol RPC server",
             getClientRpcAddress()));
-    DefaultMetricsSystem.initialize("StorageContainerManager");
+    ms = DefaultMetricsSystem.initialize("StorageContainerManager");
 
     commandWatcherLeaseManager.start();
     getClientProtocolServer().start();
@@ -993,6 +995,10 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
       metrics.unRegister();
     }
 
+    if (ms != null) {
+      ms.stop();
+    }
+
     unregisterMXBean();
     if (scmContainerMetrics != null) {
       scmContainerMetrics.unRegister();
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
index 059af5a..855a97c 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
@@ -203,7 +203,7 @@ public class MiniOzoneChaosCluster extends 
MiniOzoneClusterImpl {
           1, TimeUnit.SECONDS);
       conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 1,
           TimeUnit.SECONDS);
-      conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 8);
+      conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2);
     }
 
     @Override
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 9fbdad7..992a665 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -340,19 +340,20 @@ public class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
       ozoneManager.join();
     }
 
+    if (!hddsDatanodes.isEmpty()) {
+      LOG.info("Shutting the HddsDatanodes");
+      hddsDatanodes.parallelStream()
+          .forEach(dn -> {
+            dn.stop();
+            dn.join();
+          });
+    }
+
     if (scm != null) {
       LOG.info("Stopping the StorageContainerManager");
       scm.stop();
       scm.join();
     }
-
-    if (!hddsDatanodes.isEmpty()) {
-      LOG.info("Shutting the HddsDatanodes");
-      for (HddsDatanodeService hddsDatanode : hddsDatanodes) {
-        hddsDatanode.stop();
-        hddsDatanode.join();
-      }
-    }
   }
 
   /**
@@ -568,6 +569,8 @@ public class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
       conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
       conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
       conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numOfScmHandlers);
+      conf.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
+          "3s");
       configureSCMheartbeat();
     }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to