This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a commit to branch HDDS-5713
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/HDDS-5713 by this push:
     new 57889e7825 HDDS-8844. Internal move logic for DiskBalancer (#4887)
57889e7825 is described below

commit 57889e7825e3c198c350f5d19a0a59d7f92e23fe
Author: Symious <14933944+symi...@users.noreply.github.com>
AuthorDate: Thu Feb 1 19:53:58 2024 +0800

    HDDS-8844. Internal move logic for DiskBalancer (#4887)
---
 .../ozone/container/common/impl/ContainerSet.java  |  23 ++++
 .../container/common/interfaces/Container.java     |  11 ++
 .../ozone/container/common/interfaces/Handler.java |  14 +++
 .../container/common/utils/HddsVolumeUtil.java     |  26 ++++
 .../diskbalancer/DiskBalancerService.java          | 111 +++++++++++++++-
 .../policy/ContainerChoosingPolicy.java            |   5 +-
 .../policy/DefaultContainerChoosingPolicy.java     |  17 +--
 .../container/keyvalue/KeyValueContainer.java      | 140 +++++++++++++++++++++
 .../ozone/container/keyvalue/KeyValueHandler.java  |  33 +++++
 .../container/ozoneimpl/ContainerController.java   |  17 +++
 .../diskbalancer/TestDiskBalancerService.java      |  36 +++---
 .../src/main/proto/DatanodeClientProtocol.proto    |   1 +
 .../hdds/scm/node/TestNodeDecommissionManager.java |  11 +-
 13 files changed, 403 insertions(+), 42 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
index b5dfd07d57..b9c3ff3bb5 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
@@ -116,6 +116,29 @@ public class ContainerSet implements 
Iterable<Container<?>> {
     }
   }
 
+  /**
+   * Update Container to container map.
+   * @param container container to be added
+   * @return If container is added to containerMap returns true, otherwise
+   * false
+   */
+  public Container updateContainer(Container<?> container) throws
+      StorageContainerException {
+    Preconditions.checkNotNull(container, "container cannot be null");
+
+    long containerId = container.getContainerData().getContainerID();
+    if (!containerMap.containsKey(containerId)) {
+      LOG.error("Container doesn't exists with container Id {}", containerId);
+      throw new StorageContainerException("Container doesn't exist with " +
+          "container Id " + containerId,
+          ContainerProtos.Result.CONTAINER_NOT_FOUND);
+    } else {
+      LOG.debug("Container with container Id {} is updated to containerMap",
+          containerId);
+      return containerMap.put(containerId, container);
+    }
+  }
+
   /**
    * Returns the Container with specified containerId.
    * @param containerId ID of the container to get
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
index 5fe148d6aa..94e69f74f2 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
@@ -22,6 +22,7 @@ import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.nio.file.Path;
 import java.time.Instant;
 import java.util.Map;
 
@@ -201,6 +202,11 @@ public interface Container<CONTAINERDATA extends 
ContainerData> {
   void importContainerData(InputStream stream,
       ContainerPacker<CONTAINERDATA> packer) throws IOException;
 
+  /**
+   * Import the container from a container path.
+   */
+  void importContainerData(Path containerPath) throws IOException;
+
   /**
    * Export all the data of the container to one output archive with the help
    * of the packer.
@@ -260,6 +266,11 @@ public interface Container<CONTAINERDATA extends 
ContainerData> {
   ScanResult scanData(DataTransferThrottler throttler, Canceler canceler)
       throws InterruptedException;
 
+  /**
+   * Copy all the data of the container to the destination path.
+   */
+  void copyContainerData(Path destPath) throws IOException;
+
   /** Acquire read lock. */
   void readLock();
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
index 2ffb9d30d1..f2a96e805f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.container.common.interfaces;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.nio.file.Path;
 
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -217,4 +218,17 @@ public abstract class Handler {
     this.clusterId = clusterID;
   }
 
+  /**
+   * Copy container to the destination path.
+   */
+  public abstract void copyContainer(
+      Container container, Path destination)
+      throws IOException;
+
+  /**
+   * Imports container from a container path.
+   */
+  public abstract Container importContainer(
+      ContainerData containerData, Path containerPath) throws IOException;
 }
+
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
index 9b1da77b66..ed1d94e6db 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
@@ -33,6 +33,7 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.stream.Collectors;
 import java.util.concurrent.CompletableFuture;
 
 import static 
org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil.onFailure;
@@ -140,4 +141,29 @@ public final class HddsVolumeUtil {
         hddsVolume.setDbVolume(globalDbVolumeMap.getOrDefault(
             hddsVolume.getStorageID(), null)));
   }
+
+
+  /**
+   * Get the HddsVolume according to the path.
+   * @param volumes volume list to match from
+   * @param pathStr path to match
+   */
+  public static HddsVolume matchHddsVolume(List<HddsVolume> volumes,
+      String pathStr) throws IOException {
+    assert pathStr != null;
+    List<HddsVolume> resList = new ArrayList<>();
+    for (HddsVolume hddsVolume: volumes) {
+      if (pathStr.startsWith(hddsVolume.getVolumeRootDir())) {
+        resList.add(hddsVolume);
+      }
+    }
+    if (resList.size() == 1) {
+      return resList.get(0);
+    } else if (resList.size() > 1) {
+      throw new IOException("Get multi volumes " +
+          resList.stream().map(HddsVolume::getVolumeRootDir).collect(
+              Collectors.joining(",")) + " matching path " + pathStr);
+    }
+    return null;
+  }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerService.java
index 10980a6b1c..d7498d0ca6 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerService.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerService.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.container.diskbalancer;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
+import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -31,13 +32,18 @@ import org.apache.hadoop.hdds.utils.BackgroundTask;
 import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
 import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import 
org.apache.hadoop.ozone.container.diskbalancer.policy.ContainerChoosingPolicy;
 import 
org.apache.hadoop.ozone.container.diskbalancer.policy.VolumeChoosingPolicy;
+import 
org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
+import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
 import org.apache.hadoop.util.Time;
 import org.apache.ratis.util.FileUtils;
 import org.slf4j.Logger;
@@ -45,8 +51,10 @@ import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
+import java.nio.file.StandardCopyOption;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
@@ -327,7 +335,25 @@ public class DiskBalancerService extends BackgroundService 
{
       return queue;
     }
 
-    // TODO: Implementation for choose tasks
+    for (int i = 0; i < availableTaskCount; i++) {
+      Pair<HddsVolume, HddsVolume> pair = volumeChoosingPolicy
+          .chooseVolume(volumeSet, threshold, deltaSizes);
+      if (pair == null) {
+        continue;
+      }
+      HddsVolume sourceVolume = pair.getLeft(), destVolume = pair.getRight();
+      ContainerData toBalanceContainer = containerChoosingPolicy
+          .chooseContainer(ozoneContainer, sourceVolume, inProgressContainers);
+      if (toBalanceContainer != null) {
+        queue.add(new DiskBalancerTask(toBalanceContainer, sourceVolume,
+            destVolume));
+        inProgressContainers.add(toBalanceContainer.getContainerID());
+        deltaSizes.put(sourceVolume, deltaSizes.getOrDefault(sourceVolume, 0L)
+            - toBalanceContainer.getBytesUsed());
+        deltaSizes.put(destVolume, deltaSizes.getOrDefault(destVolume, 0L)
+            + toBalanceContainer.getBytesUsed());
+      }
+    }
 
     if (queue.isEmpty()) {
       metrics.incrIdleLoopNoAvailableVolumePairCount();
@@ -367,7 +393,88 @@ public class DiskBalancerService extends BackgroundService 
{
 
     @Override
     public BackgroundTaskResult call() {
-      // TODO: Details of handling tasks
+      long containerId = containerData.getContainerID();
+      boolean destVolumeIncreased = false;
+      Path diskBalancerTmpDir = null, diskBalancerDestDir = null;
+      long containerSize = containerData.getBytesUsed();
+      try {
+        diskBalancerTmpDir = destVolume.getTmpDir().toPath()
+            .resolve(DISK_BALANCER_DIR).resolve(String.valueOf(containerId));
+
+        // Copy container to new Volume's tmp Dir
+        ozoneContainer.getController().copyContainer(containerData,
+            diskBalancerTmpDir);
+
+        // Move container directory to final place on new volume
+        String idDir = VersionedDatanodeFeatures.ScmHA.chooseContainerPathID(
+            destVolume, destVolume.getClusterID());
+        diskBalancerDestDir =
+            Paths.get(KeyValueContainerLocationUtil.getBaseContainerLocation(
+                destVolume.getHddsRootDir().toString(), idDir,
+                containerData.getContainerID()));
+        Path destDirParent = diskBalancerDestDir.getParent();
+        if (destDirParent != null) {
+          Files.createDirectories(destDirParent);
+        }
+        Files.move(diskBalancerTmpDir, diskBalancerDestDir,
+            StandardCopyOption.ATOMIC_MOVE,
+            StandardCopyOption.REPLACE_EXISTING);
+
+        // Generate a new Container based on destDir
+        File containerFile = ContainerUtils.getContainerFile(
+            diskBalancerDestDir.toFile());
+        if (!containerFile.exists()) {
+          throw new IOException("ContainerFile for container " + containerId
+          + " doesn't exists.");
+        }
+        ContainerData originalContainerData = ContainerDataYaml
+            .readContainerFile(containerFile);
+        Container newContainer = ozoneContainer.getController()
+            .importContainer(originalContainerData, diskBalancerDestDir);
+        newContainer.getContainerData().getVolume()
+            .incrementUsedSpace(containerSize);
+        destVolumeIncreased = true;
+
+        // Update container for containerID
+        Container oldContainer = ozoneContainer.getContainerSet()
+            .getContainer(containerId);
+        oldContainer.writeLock();
+        try {
+          ozoneContainer.getContainerSet().updateContainer(newContainer);
+          oldContainer.delete();
+        } finally {
+          oldContainer.writeUnlock();
+        }
+        oldContainer.getContainerData().getVolume()
+            .decrementUsedSpace(containerSize);
+        metrics.incrSuccessCount(1);
+        metrics.incrSuccessBytes(containerSize);
+      } catch (IOException e) {
+        if (diskBalancerTmpDir != null) {
+          try {
+            Files.deleteIfExists(diskBalancerTmpDir);
+          } catch (IOException ex) {
+            LOG.warn("Failed to delete tmp directory {}", diskBalancerTmpDir,
+                ex);
+          }
+        }
+        if (diskBalancerDestDir != null) {
+          try {
+            Files.deleteIfExists(diskBalancerDestDir);
+          } catch (IOException ex) {
+            LOG.warn("Failed to delete dest directory {}: {}.",
+                diskBalancerDestDir, ex.getMessage());
+          }
+        }
+        // Only need to check for destVolume, sourceVolume's usedSpace is
+        // updated at last, if it reaches there, there is no exception.
+        if (destVolumeIncreased) {
+          destVolume.decrementUsedSpace(containerSize);
+        }
+        metrics.incrFailureCount();
+      } finally {
+        postCall();
+      }
       return BackgroundTaskResult.EmptyTaskResult.newResult();
     }
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/ContainerChoosingPolicy.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/ContainerChoosingPolicy.java
index 61b2eeba28..1c699d5305 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/ContainerChoosingPolicy.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/ContainerChoosingPolicy.java
@@ -22,7 +22,6 @@ import 
org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 
-import java.util.List;
 import java.util.Set;
 
 /**
@@ -34,6 +33,6 @@ public interface ContainerChoosingPolicy {
    *
    * @return a Container
    */
-  List<ContainerData> chooseContainer(OzoneContainer ozoneContainer,
-      HddsVolume volume, Set<Long> inProgressContainerIDs, Long targetSize);
+  ContainerData chooseContainer(OzoneContainer ozoneContainer,
+      HddsVolume volume, Set<Long> inProgressContainerIDs);
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/DefaultContainerChoosingPolicy.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/DefaultContainerChoosingPolicy.java
index da1e3610ad..f309ac43fa 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/DefaultContainerChoosingPolicy.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/policy/DefaultContainerChoosingPolicy.java
@@ -25,9 +25,7 @@ import 
org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.List;
 import java.util.Set;
 
 /**
@@ -38,22 +36,17 @@ public class DefaultContainerChoosingPolicy implements 
ContainerChoosingPolicy {
       DefaultContainerChoosingPolicy.class);
 
   @Override
-  public List<ContainerData> chooseContainer(OzoneContainer ozoneContainer,
-      HddsVolume hddsVolume, Set<Long> inProgressContainerIDs,
-      Long targetSize) {
-    List<ContainerData> results = new ArrayList<>();
-    long sizeTotal = 0L;
-
+  public ContainerData chooseContainer(OzoneContainer ozoneContainer,
+      HddsVolume hddsVolume, Set<Long> inProgressContainerIDs) {
     Iterator<Container<?>> itr = ozoneContainer.getController()
         .getContainers(hddsVolume);
-    while (itr.hasNext() && sizeTotal < targetSize) {
+    while (itr.hasNext()) {
       ContainerData containerData = itr.next().getContainerData();
       if (!inProgressContainerIDs.contains(
           containerData.getContainerID()) && containerData.isClosed()) {
-        results.add(containerData);
-        sizeTotal += containerData.getBytesUsed();
+        return containerData;
       }
     }
-    return results;
+    return null;
   }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 8388182667..86fed1be73 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -69,6 +69,7 @@ import 
org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import com.google.common.base.Preconditions;
 import org.apache.commons.io.FileUtils;
 import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_ALREADY_EXISTS;
+import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_DESCRIPTOR_MISSING;
 import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_FILES_CREATE_ERROR;
 import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_INTERNAL_ERROR;
 import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_OPEN;
@@ -679,6 +680,52 @@ public class KeyValueContainer implements 
Container<KeyValueContainerData> {
     KeyValueContainerUtil.parseKVContainerData(containerData, config);
   }
 
+  @Override
+  public void importContainerData(Path containerPath) throws IOException {
+    writeLock();
+    try {
+      if (!getContainerFile().exists()) {
+        String errorMessage = String.format(
+            "Can't load container (cid=%d) data from a specific location"
+                + " as the container descriptor (%s) is missing",
+            getContainerData().getContainerID(),
+            getContainerFile().getAbsolutePath());
+        throw new StorageContainerException(errorMessage,
+            CONTAINER_DESCRIPTOR_MISSING);
+      }
+      KeyValueContainerData originalContainerData =
+          (KeyValueContainerData) ContainerDataYaml
+              .readContainerFile(getContainerFile());
+
+      importContainerData(originalContainerData);
+    } catch (Exception ex) {
+      if (ex instanceof StorageContainerException &&
+          ((StorageContainerException) ex).getResult() ==
+              CONTAINER_DESCRIPTOR_MISSING) {
+        throw ex;
+      }
+      //delete all the temporary data in case of any exception.
+      try {
+        if (containerData.getSchemaVersion() != null &&
+            containerData.getSchemaVersion().equals(OzoneConsts.SCHEMA_V3)) {
+          BlockUtils.removeContainerFromDB(containerData, config);
+        }
+        FileUtils.deleteDirectory(new File(containerData.getMetadataPath()));
+        FileUtils.deleteDirectory(new File(containerData.getChunksPath()));
+        FileUtils.deleteDirectory(
+            new File(getContainerData().getContainerPath()));
+      } catch (Exception deleteex) {
+        LOG.error(
+            "Can not cleanup destination directories after a container load"
+                + " error (cid" +
+                containerData.getContainerID() + ")", deleteex);
+      }
+      throw ex;
+    } finally {
+      writeUnlock();
+    }
+  }
+
   @Override
   public void exportContainerData(OutputStream destination,
       ContainerPacker<KeyValueContainerData> packer) throws IOException {
@@ -983,6 +1030,79 @@ public class KeyValueContainer implements 
Container<KeyValueContainerData> {
     return checker.fullCheck(throttler, canceler);
   }
 
+  @Override
+  public void copyContainerData(Path destination) throws IOException {
+    writeLock();
+    try {
+      // Closed/ Quasi closed containers are considered for replication by
+      // replication manager if they are under-replicated.
+      ContainerProtos.ContainerDataProto.State state =
+          getContainerData().getState();
+      if (!(state == ContainerProtos.ContainerDataProto.State.CLOSED ||
+          state == ContainerDataProto.State.QUASI_CLOSED)) {
+        throw new IllegalStateException(
+            "Only (quasi)closed containers can be exported, but " +
+                "ContainerId=" + getContainerData().getContainerID() +
+                " is in state " + state);
+      }
+
+      try {
+        if (!containerData.getSchemaVersion().equals(OzoneConsts.SCHEMA_V3)) {
+          compactDB();
+          // Close DB (and remove from cache) to avoid concurrent modification
+          // while copying it.
+          BlockUtils.removeDB(containerData, config);
+        }
+      } finally {
+        readLock();
+        writeUnlock();
+      }
+
+      if (containerData.getSchemaVersion().equals(OzoneConsts.SCHEMA_V3)) {
+        // Synchronize the dump and copy operation,
+        // so concurrent copy don't get dump files overwritten.
+        // We seldom got concurrent exports for a container,
+        // so it should not influence performance much.
+        synchronized (dumpLock) {
+          BlockUtils.dumpKVContainerDataToFiles(containerData, config);
+          copyContainerToDestination(destination);
+        }
+      } else {
+        copyContainerToDestination(destination);
+      }
+    } catch (Exception e) {
+      LOG.error("Got exception when copying container {} to {}",
+          containerData.getContainerID(), destination, e);
+    } finally {
+      if (lock.isWriteLockedByCurrentThread()) {
+        writeUnlock();
+      } else {
+        readUnlock();
+      }
+    }
+  }
+
+  /**
+   * Set all of the path realted container data fields based on the name
+   * conventions.
+   *
+   */
+  public void populatePathFields(HddsVolume volume, Path containerPath) {
+    containerData.setMetadataPath(
+        KeyValueContainerLocationUtil.getContainerMetaDataPath(
+            containerPath.toString()).toString());
+    containerData.setChunksPath(
+        KeyValueContainerLocationUtil.getChunksLocationPath(
+            containerPath.toString()).toString()
+    );
+    containerData.setVolume(volume);
+    containerData.setDbFile(getContainerDBFile());
+  }
+
+  private enum ContainerCheckLevel {
+    NO_CHECK, FAST_CHECK, FULL_CHECK
+  }
+
   /**
    * Creates a temporary file.
    * @param file
@@ -1010,4 +1130,24 @@ public class KeyValueContainer implements 
Container<KeyValueContainerData> {
       packer.pack(this, destination);
     }
   }
+
+  /**
+   * Copy container directory to destination path.
+   * @param destination destination path
+   * @throws IOException file operation exception
+   */
+  private void copyContainerToDestination(Path destination)
+      throws IOException {
+    try {
+      if (Files.exists(destination)) {
+        FileUtils.deleteDirectory(destination.toFile());
+      }
+      FileUtils.copyDirectory(new File(containerData.getContainerPath()),
+          destination.toFile());
+
+    } catch (IOException e) {
+      LOG.error("Failed when copying container to {}", destination, e);
+      throw e;
+    }
+  }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 362c08c6a9..f2ed3979de 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -74,6 +74,7 @@ import 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
 import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
+import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
 import org.apache.hadoop.ozone.container.common.utils.ContainerLogger;
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
@@ -1167,6 +1168,38 @@ public class KeyValueHandler extends Handler {
     }
   }
 
+  @Override
+  public void copyContainer(final Container container, Path destinationPath)
+      throws IOException {
+    final KeyValueContainer kvc = (KeyValueContainer) container;
+    kvc.copyContainerData(destinationPath);
+  }
+
+  @Override
+  public Container importContainer(ContainerData originalContainerData,
+      final Path containerPath) throws IOException {
+    Preconditions.checkState(originalContainerData instanceof
+        KeyValueContainerData, "Should be KeyValueContainerData instance");
+
+    KeyValueContainerData containerData = new KeyValueContainerData(
+        (KeyValueContainerData) originalContainerData);
+
+    KeyValueContainer container = new KeyValueContainer(containerData,
+        conf);
+
+    HddsVolume volume = HddsVolumeUtil.matchHddsVolume(
+        StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()),
+        containerPath.toString());
+    if (volume == null ||
+        !containerPath.startsWith(volume.getVolumeRootDir())) {
+      throw new IOException("ContainerPath " + containerPath
+          + " doesn't match volume " + volume);
+    }
+    container.populatePathFields(volume, containerPath);
+    container.importContainerData(containerPath);
+    return container;
+  }
+
   @Override
   public void deleteContainer(Container container, boolean force)
       throws IOException {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
index feb5805387..07c627fc99 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
@@ -36,6 +36,7 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.nio.file.Path;
 import java.time.Instant;
 import java.util.Iterator;
 import java.util.Map;
@@ -167,6 +168,22 @@ public class ContainerController {
         .importContainer(containerData, rawContainerStream, packer);
   }
 
+  public void copyContainer(final ContainerData containerData,
+      final Path destinationPath) throws IOException {
+    handlers.get(containerData.getContainerType())
+        .copyContainer(
+            containerSet.getContainer(containerData.getContainerID()),
+            destinationPath);
+  }
+
+  public Container importContainer(
+      final ContainerData containerData,
+      final Path containerPath)
+      throws IOException {
+    return handlers.get(containerData.getContainerType())
+        .importContainer(containerData, containerPath);
+  }
+
   public void exportContainer(final ContainerType type,
       final long containerId, final OutputStream outputStream,
       final TarContainerPacker packer) throws IOException {
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/diskbalancer/TestDiskBalancerService.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/diskbalancer/TestDiskBalancerService.java
index 83aff1a971..664ac7a3ab 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/diskbalancer/TestDiskBalancerService.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/diskbalancer/TestDiskBalancerService.java
@@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.container.common.TestBlockDeletingService;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
@@ -37,10 +36,9 @@ import 
org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.ozone.test.GenericTestUtils;
+import org.junit.jupiter.api.Timeout;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.Timeout;
 
 import java.io.File;
 import java.io.IOException;
@@ -57,28 +55,21 @@ import static org.mockito.Mockito.when;
 /**
  * This is a test class for DiskBalancerService.
  */
+@Timeout(30)
 public class TestDiskBalancerService {
   private File testRoot;
   private String scmId;
   private String datanodeUuid;
-  private OzoneConfiguration conf;
+  private OzoneConfiguration conf = new OzoneConfiguration();
 
-  private final ContainerLayoutVersion layout;
-  private final String schemaVersion;
+  private ContainerLayoutVersion layout;
+  private String schemaVersion;
   private MutableVolumeSet volumeSet;
 
-  public TestDiskBalancerService(ContainerTestVersionInfo versionInfo) {
-    this.layout = versionInfo.getLayout();
-    this.schemaVersion = versionInfo.getSchemaVersion();
-    conf = new OzoneConfiguration();
-    ContainerTestVersionInfo.setTestSchemaVersion(schemaVersion, conf);
-  }
-
-
   @BeforeEach
   public void init() throws IOException {
     testRoot = GenericTestUtils
-        .getTestDir(TestBlockDeletingService.class.getSimpleName());
+        .getTestDir(TestDiskBalancerService.class.getSimpleName());
     if (testRoot.exists()) {
       FileUtils.cleanDirectory(testRoot);
     }
@@ -100,9 +91,9 @@ public class TestDiskBalancerService {
     FileUtils.deleteDirectory(testRoot);
   }
 
-  @Timeout(30)
   @ContainerTestVersionInfo.ContainerTest
-  public void testUpdateService() throws Exception {
+  public void testUpdateService(ContainerTestVersionInfo versionInfo) throws 
Exception {
+    setLayoutAndSchemaForTest(versionInfo);
     // Increase volume's usedBytes
     for (StorageVolume volume : volumeSet.getVolumeMap().values()) {
       volume.incrementUsedSpace(volume.getCapacity() / 2);
@@ -142,8 +133,9 @@ public class TestDiskBalancerService {
     svc.shutdown();
   }
 
-  @Test
-  public void testPolicyClassInitialization() throws IOException {
+  @ContainerTestVersionInfo.ContainerTest
+  public void testPolicyClassInitialization(ContainerTestVersionInfo 
versionInfo) throws IOException {
+    setLayoutAndSchemaForTest(versionInfo);
     ContainerSet containerSet = new ContainerSet(1000);
     ContainerMetrics metrics = ContainerMetrics.create(conf);
     KeyValueHandler keyValueHandler =
@@ -190,4 +182,10 @@ public class TestDiskBalancerService {
     when(ozoneContainer.getController()).thenReturn(controller);
     return ozoneContainer;
   }
+
+  private void setLayoutAndSchemaForTest(ContainerTestVersionInfo versionInfo) 
{
+    this.layout = versionInfo.getLayout();
+    this.schemaVersion = versionInfo.getSchemaVersion();
+    ContainerTestVersionInfo.setTestSchemaVersion(schemaVersion, conf);
+  }
 }
diff --git 
a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto 
b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto
index 718e2a108c..cb07f151e9 100644
--- a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto
@@ -152,6 +152,7 @@ enum Result {
   DELETE_ON_NON_EMPTY_CONTAINER = 44;
   EXPORT_CONTAINER_METADATA_FAILED = 45;
   IMPORT_CONTAINER_METADATA_FAILED = 46;
+  CONTAINER_DESCRIPTOR_MISSING = 47;
 }
 
 /**
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
index 332d762a4c..e9bbe97799 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.HddsTestUtils;
 import org.apache.hadoop.hdds.scm.DatanodeAdminError;
 import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.node.NodeUtils.HostDefinition;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.server.events.EventQueue;
@@ -71,21 +72,19 @@ public class TestNodeDecommissionManager {
   @Test
   public void testHostStringsParseCorrectly()
       throws InvalidHostStringException {
-    NodeDecommissionManager.HostDefinition def =
-        new NodeDecommissionManager.HostDefinition("foobar");
+    HostDefinition def = new HostDefinition("foobar");
     assertEquals("foobar", def.getHostname());
     assertEquals(-1, def.getPort());
 
-    def = new NodeDecommissionManager.HostDefinition(" foobar ");
+    def = new HostDefinition(" foobar ");
     assertEquals("foobar", def.getHostname());
     assertEquals(-1, def.getPort());
 
-    def = new NodeDecommissionManager.HostDefinition("foobar:1234");
+    def = new HostDefinition("foobar:1234");
     assertEquals("foobar", def.getHostname());
     assertEquals(1234, def.getPort());
 
-    def = new NodeDecommissionManager.HostDefinition(
-        "foobar.mycompany.com:1234");
+    def = new HostDefinition("foobar.mycompany.com:1234");
     assertEquals("foobar.mycompany.com", def.getHostname());
     assertEquals(1234, def.getPort());
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@ozone.apache.org
For additional commands, e-mail: commits-h...@ozone.apache.org


Reply via email to