HDFS-10858. FBR processing may generate incorrect reportedBlock-blockGroup 
mapping. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72dfb048
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72dfb048
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72dfb048

Branch: refs/heads/HADOOP-12756
Commit: 72dfb048a9a7be64b371b74478b90150bf300d35
Parents: 59d5966
Author: Jing Zhao <ji...@apache.org>
Authored: Mon Sep 12 16:38:39 2016 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Sep 12 16:40:11 2016 -0700

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    |   4 +-
 .../namenode/TestAddStripedBlockInFBR.java      | 109 +++++++++++++++++++
 2 files changed, 111 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72dfb048/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 1362c0b..3a12d74 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2624,7 +2624,7 @@ public class BlockManager implements BlockStatsMXBean {
       } while (storageBlock != null);
     }
 
-    // Iterate any remaing blocks that have not been reported and remove them
+    // Iterate any remaining blocks that have not been reported and remove them
     while (storageBlocksIterator.hasNext()) {
       toRemove.add(storageBlocksIterator.next());
     }
@@ -2677,7 +2677,7 @@ public class BlockManager implements BlockStatsMXBean {
                 corruptReplicas.isReplicaCorrupt(storedBlock, dn))) {
       // Add replica if appropriate. If the replica was previously corrupt
       // but now okay, it might need to be updated.
-      toAdd.add(new BlockInfoToAdd(storedBlock, replica));
+      toAdd.add(new BlockInfoToAdd(storedBlock, new Block(replica)));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72dfb048/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java
new file mode 100644
index 0000000..37b334f
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.mockito.Mockito;
+import org.mockito.internal.util.reflection.Whitebox;
+
+import java.io.IOException;
+
+import static 
org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
+import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS;
+import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
+
+public class TestAddStripedBlockInFBR {
+  private final short GROUP_SIZE = (short) (NUM_DATA_BLOCKS + 
NUM_PARITY_BLOCKS);
+
+  private MiniDFSCluster cluster;
+  private DistributedFileSystem dfs;
+
+  @Rule
+  public Timeout globalTimeout = new Timeout(300000);
+
+  @Before
+  public void setup() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build();
+    cluster.waitActive();
+    dfs = cluster.getFileSystem();
+  }
+
+  @After
+  public void tearDown() {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  @Test
+  public void testAddBlockInFullBlockReport() throws Exception {
+    BlockManager spy = Mockito.spy(cluster.getNamesystem().getBlockManager());
+    // let NN ignore one DataNode's IBR
+    final DataNode dn = cluster.getDataNodes().get(0);
+    final DatanodeID datanodeID = dn.getDatanodeId();
+    Mockito.doNothing().when(spy)
+        .processIncrementalBlockReport(Mockito.eq(datanodeID), Mockito.any());
+    Whitebox.setInternalState(cluster.getNamesystem(), "blockManager", spy);
+
+    final Path ecDir = new Path("/ec");
+    final Path repDir = new Path("/rep");
+    dfs.mkdirs(ecDir);
+    dfs.mkdirs(repDir);
+    dfs.getClient().setErasureCodingPolicy(ecDir.toString(), null);
+
+    // create several non-EC files and one EC file
+    final Path[] repFiles = new Path[GROUP_SIZE];
+    for (int i = 0; i < GROUP_SIZE; i++) {
+      repFiles[i] = new Path(repDir, "f" + i);
+      DFSTestUtil.createFile(dfs, repFiles[i], 1L, (short) 3, 0L);
+    }
+    final Path ecFile = new Path(ecDir, "f");
+    DFSTestUtil.createFile(dfs, ecFile,
+        BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS, (short) 1, 0L);
+
+    // trigger dn's FBR. The FBR will add block-dn mapping.
+    DataNodeTestUtils.triggerBlockReport(dn);
+
+    // make sure NN has correct block-dn mapping
+    BlockInfoStriped blockInfo = (BlockInfoStriped) cluster.getNamesystem()
+        .getFSDirectory().getINode(ecFile.toString()).asFile().getLastBlock();
+    NumberReplicas nr = spy.countNodes(blockInfo);
+    Assert.assertEquals(GROUP_SIZE, nr.liveReplicas());
+    Assert.assertEquals(0, nr.excessReplicas());
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to