This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 2b16d53  HDDS-1898. GrpcReplicationService#download cannot replicate 
the container. (#1326)
2b16d53 is described below

commit 2b16d5377c39518ed0140fd27f5b000d464c2f43
Author: Nanda kumar <na...@apache.org>
AuthorDate: Thu Sep 5 18:12:36 2019 +0530

    HDDS-1898. GrpcReplicationService#download cannot replicate the container. 
(#1326)
---
 .../ozone/container/common/interfaces/Handler.java | 16 ++++++++++---
 .../commandhandler/DeleteBlocksCommandHandler.java |  7 +++++-
 .../container/keyvalue/KeyValueContainer.java      |  9 +++----
 .../ozone/container/keyvalue/KeyValueHandler.java  | 28 +++++++++++++++++-----
 .../background/BlockDeletingService.java           |  6 +++++
 .../container/ozoneimpl/ContainerController.java   | 12 ++++++++--
 .../OnDemandContainerReplicationSource.java        | 18 ++++----------
 .../ozone/container/TestContainerReplication.java  |  8 +++++++
 8 files changed, 75 insertions(+), 29 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
index 97601ec..8c3b981 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
@@ -19,8 +19,9 @@
 package org.apache.hadoop.ozone.container.common.interfaces;
 
 
-import java.io.FileInputStream;
 import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -109,14 +110,23 @@ public abstract class Handler {
       DispatcherContext dispatcherContext);
 
   /**
-   * Import container data from a raw input stream.
+   * Imports container from a raw input stream.
    */
   public abstract Container importContainer(
       long containerID,
       long maxSize,
       String originPipelineId,
       String originNodeId,
-      FileInputStream rawContainerStream,
+      InputStream rawContainerStream,
+      TarContainerPacker packer)
+      throws IOException;
+
+  /**
+   * Exports container to the output stream.
+   */
+  public abstract void exportContainer(
+      Container container,
+      OutputStream outputStream,
       TarContainerPacker packer)
       throws IOException;
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index a5d4760..a4849f2 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -127,7 +127,12 @@ public class DeleteBlocksCommandHandler implements 
CommandHandler {
           case KeyValueContainer:
             KeyValueContainerData containerData = (KeyValueContainerData)
                 cont.getContainerData();
-            deleteKeyValueContainerBlocks(containerData, entry);
+            cont.writeLock();
+            try {
+              deleteKeyValueContainerBlocks(containerData, entry);
+            } finally {
+              cont.writeUnlock();
+            }
             txResultBuilder.setContainerID(containerId)
                 .setSuccess(true);
             break;
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 53065cc..b7f46c9 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -330,6 +330,9 @@ public class KeyValueContainer implements 
Container<KeyValueContainerData> {
     } finally {
       writeUnlock();
     }
+    LOG.info("Container {} is closed with bcsId {}.",
+        containerData.getContainerID(),
+        containerData.getBlockCommitSequenceId());
   }
 
   /**
@@ -361,13 +364,10 @@ public class KeyValueContainer implements 
Container<KeyValueContainerData> {
     }
   }
 
-  void compactDB() throws StorageContainerException {
+  private void compactDB() throws StorageContainerException {
     try {
       try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
         db.getStore().compactDB();
-        LOG.info("Container {} is closed with bcsId {}.",
-            containerData.getContainerID(),
-            containerData.getBlockCommitSequenceId());
       }
     } catch (StorageContainerException ex) {
       throw ex;
@@ -524,6 +524,7 @@ public class KeyValueContainer implements 
Container<KeyValueContainerData> {
           "Only closed containers could be exported: ContainerId="
               + getContainerData().getContainerID());
     }
+    compactDB();
     packer.pack(this, destination);
   }
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 50e3706..ab1d124 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -18,8 +18,9 @@
 
 package org.apache.hadoop.ozone.container.keyvalue;
 
-import java.io.FileInputStream;
 import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
 import java.nio.ByteBuffer;
 import java.util.HashMap;
 import java.util.LinkedList;
@@ -841,13 +842,14 @@ public class KeyValueHandler extends Handler {
     throw new StorageContainerException(msg, result);
   }
 
-  public Container importContainer(long containerID, long maxSize,
-      String originPipelineId,
-      String originNodeId,
-      FileInputStream rawContainerStream,
-      TarContainerPacker packer)
+  @Override
+  public Container importContainer(final long containerID,
+      final long maxSize, final String originPipelineId,
+      final String originNodeId, final InputStream rawContainerStream,
+      final TarContainerPacker packer)
       throws IOException {
 
+    // TODO: Add layout version!
     KeyValueContainerData containerData =
         new KeyValueContainerData(containerID,
             maxSize, originPipelineId, originNodeId);
@@ -863,6 +865,20 @@ public class KeyValueHandler extends Handler {
   }
 
   @Override
+  public void exportContainer(final Container container,
+      final OutputStream outputStream,
+      final TarContainerPacker packer)
+      throws IOException{
+    container.readLock();
+    try {
+      final KeyValueContainer kvc = (KeyValueContainer) container;
+      kvc.exportContainerData(outputStream, packer);
+    } finally {
+      container.readUnlock();
+    }
+  }
+
+  @Override
   public void markContainerForClose(Container container)
       throws IOException {
     // Move the container to CLOSING state only if it's OPEN
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
index 25c00c3..fd048d7 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import 
org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import 
org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy;
 import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
@@ -247,6 +248,9 @@ public class BlockDeletingService extends BackgroundService 
{
     @Override
     public BackgroundTaskResult call() throws Exception {
       ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult();
+      final Container container = ozoneContainer.getContainerSet()
+          .getContainer(containerData.getContainerID());
+      container.writeLock();
       long startTime = Time.monotonicNow();
       // Scan container's db and get list of under deletion blocks
       try (ReferenceCountedDB meta = BlockUtils.getDB(containerData, conf)) {
@@ -313,6 +317,8 @@ public class BlockDeletingService extends BackgroundService 
{
         }
         crr.addAll(succeedBlocks);
         return crr;
+      } finally {
+        container.writeUnlock();
       }
     }
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
index 523e63f..eb672a7 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
@@ -29,8 +29,9 @@ import 
org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
 
-import java.io.FileInputStream;
 import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
 import java.util.Iterator;
 import java.util.Map;
 
@@ -120,13 +121,20 @@ public class ContainerController {
 
   public Container importContainer(final ContainerType type,
       final long containerId, final long maxSize, final String 
originPipelineId,
-      final String originNodeId, final FileInputStream rawContainerStream,
+      final String originNodeId, final InputStream rawContainerStream,
       final TarContainerPacker packer)
       throws IOException {
     return handlers.get(type).importContainer(containerId, maxSize,
         originPipelineId, originNodeId, rawContainerStream, packer);
   }
 
+  public void exportContainer(final ContainerType type,
+      final long containerId, final OutputStream outputStream,
+      final TarContainerPacker packer) throws IOException {
+    handlers.get(type).exportContainer(
+        containerSet.getContainer(containerId), outputStream, packer);
+  }
+
   /**
    * Deletes a container given its Id.
    * @param containerId Id of the container to be deleted
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/OnDemandContainerReplicationSource.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/OnDemandContainerReplicationSource.java
index 28b8713..d318ffa 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/OnDemandContainerReplicationSource.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/OnDemandContainerReplicationSource.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 import java.io.OutputStream;
 
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker;
 import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
 
 import com.google.common.base.Preconditions;
@@ -41,7 +40,7 @@ public class OnDemandContainerReplicationSource
 
   private ContainerController controller;
 
-  private ContainerPacker packer = new TarContainerPacker();
+  private TarContainerPacker packer = new TarContainerPacker();
 
   public OnDemandContainerReplicationSource(
       ContainerController controller) {
@@ -59,18 +58,11 @@ public class OnDemandContainerReplicationSource
 
     Container container = controller.getContainer(containerId);
 
-    Preconditions
-        .checkNotNull(container, "Container is not found " + containerId);
+    Preconditions.checkNotNull(
+        container, "Container is not found " + containerId);
 
-    switch (container.getContainerType()) {
-    case KeyValueContainer:
-      packer.pack(container,
-          destination);
-      break;
-    default:
-      LOG.warn("Container type " + container.getContainerType()
-          + " is not replicable as no compression algorithm for that.");
-    }
+    controller.exportContainer(
+        container.getContainerType(), containerId, destination, packer);
 
   }
 }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
index ab78705..7e8ff3c 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer;
+import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
 import org.apache.hadoop.test.GenericTestUtils;
 
@@ -119,6 +120,13 @@ public class TestContainerReplication {
         chooseDatanodeWithoutContainer(sourcePipelines,
             cluster.getHddsDatanodes());
 
+    // Close the container
+    cluster.getStorageContainerManager().getScmNodeManager()
+        .addDatanodeCommand(
+            sourceDatanodes.get(0).getUuid(),
+            new CloseContainerCommand(containerId,
+                sourcePipelines.getId(), true));
+
     //WHEN: send the order to replicate the container
     cluster.getStorageContainerManager().getScmNodeManager()
         .addDatanodeCommand(destinationDatanode.getDatanodeDetails().getUuid(),


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to