hadoop git commit: HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks on failed storages. Contributed by Kuhu Shukla.

2016-08-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 575802683 -> 5b3deac2e


HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks on 
failed storages. Contributed by Kuhu Shukla.

(cherry picked from commit f715f141856cb6a4c6574893f40f9865653b631e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b3deac2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b3deac2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b3deac2

Branch: refs/heads/branch-2.8
Commit: 5b3deac2e3fb2498d1fdb39994e83f917f14de2a
Parents: 5758026
Author: Kihwal Lee 
Authored: Fri Aug 5 11:43:43 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Aug 5 11:43:43 2016 -0500

--
 .../server/blockmanagement/BlockManager.java| 22 +++--
 .../apache/hadoop/hdfs/TestFileCorruption.java  | 87 +++-
 2 files changed, 103 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b3deac2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8b60a0f..c617208 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -954,8 +954,8 @@ public class BlockManager implements BlockStatsMXBean {
 }
 
 final int numNodes = blocksMap.numNodes(blk);
-final boolean isCorrupt = numCorruptNodes == numNodes;
-final int numMachines = isCorrupt ? numNodes: numNodes - numCorruptNodes;
+final boolean isCorrupt = numCorruptReplicas == numNodes;
+final int numMachines = isCorrupt ? numNodes: numNodes - 
numCorruptReplicas;
 DatanodeStorageInfo[] machines = new DatanodeStorageInfo[numMachines];
 int j = 0;
 if (numMachines > 0) {
@@ -1271,11 +1271,23 @@ public class BlockManager implements BlockStatsMXBean {
   + " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid()
   + ") does not exist");
 }
-
+
+DatanodeStorageInfo storage = null;
+if (storageID != null) {
+  storage = node.getStorageInfo(storageID);
+}
+if (storage == null) {
+  storage = storedBlock.findStorageInfo(node);
+}
+
+if (storage == null) {
+  blockLog.debug("BLOCK* findAndMarkBlockAsCorrupt: {} not found on {}",
+  blk, dn);
+  return;
+}
 markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock,
 blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED),
-storageID == null ? null : node.getStorageInfo(storageID),
-node);
+storage, node);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b3deac2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index 1ed8603..2437e38 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -18,15 +18,22 @@
 
 package org.apache.hadoop.hdfs;
 
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
+import java.io.File;
+import java.io.IOException;
 import java.io.FileOutputStream;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Random;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
@@ -36,6 +43,8 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import 

[27/50] hadoop git commit: HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks on failed storages. Contributed by Kuhu Shukla

2016-05-03 Thread aw
HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks on 
failed storages. Contributed by Kuhu Shukla


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6243eabb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6243eabb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6243eabb

Branch: refs/heads/HADOOP-12930
Commit: 6243eabb48390fffada2418ade5adf9e0766afbe
Parents: cf2ee45
Author: Kihwal Lee 
Authored: Thu Apr 28 12:42:28 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 28 12:44:53 2016 -0500

--
 .../server/blockmanagement/BlockManager.java| 23 --
 .../apache/hadoop/hdfs/TestFileCorruption.java  | 87 +++-
 2 files changed, 103 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6243eabb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 70086e6..accfc38 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1038,9 +1038,9 @@ public class BlockManager implements BlockStatsMXBean {
 }
 
 final int numNodes = blocksMap.numNodes(blk);
-final boolean isCorrupt = numCorruptNodes != 0 &&
-numCorruptNodes == numNodes;
-final int numMachines = isCorrupt ? numNodes: numNodes - numCorruptNodes;
+final boolean isCorrupt = numCorruptReplicas != 0 &&
+numCorruptReplicas == numNodes;
+final int numMachines = isCorrupt ? numNodes: numNodes - 
numCorruptReplicas;
 final DatanodeStorageInfo[] machines = new 
DatanodeStorageInfo[numMachines];
 final byte[] blockIndices = blk.isStriped() ? new byte[numMachines] : null;
 int j = 0, i = 0;
@@ -1366,11 +1366,22 @@ public class BlockManager implements BlockStatsMXBean {
   + " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid()
   + ") does not exist");
 }
-
+DatanodeStorageInfo storage = null;
+if (storageID != null) {
+  storage = node.getStorageInfo(storageID);
+}
+if (storage == null) {
+  storage = storedBlock.findStorageInfo(node);
+}
+
+if (storage == null) {
+  blockLog.debug("BLOCK* findAndMarkBlockAsCorrupt: {} not found on {}",
+  blk, dn);
+  return;
+}
 markBlockAsCorrupt(new BlockToMarkCorrupt(reportedBlock, storedBlock,
 blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED),
-storageID == null ? null : node.getStorageInfo(storageID),
-node);
+storage, node);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6243eabb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index c1a7ebb..011baa1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -18,15 +18,22 @@
 
 package org.apache.hadoop.hdfs;
 
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
+import java.io.File;
+import java.io.IOException;
 import java.io.FileOutputStream;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Random;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
@@ -36,6 +43,8 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import 

[40/50] [abbrv] hadoop git commit: HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks on failed storages. Contributed by Kuhu Shukla

2016-04-29 Thread arp
HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks on 
failed storages. Contributed by Kuhu Shukla


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6243eabb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6243eabb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6243eabb

Branch: refs/heads/HDFS-7240
Commit: 6243eabb48390fffada2418ade5adf9e0766afbe
Parents: cf2ee45
Author: Kihwal Lee 
Authored: Thu Apr 28 12:42:28 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 28 12:44:53 2016 -0500

--
 .../server/blockmanagement/BlockManager.java| 23 --
 .../apache/hadoop/hdfs/TestFileCorruption.java  | 87 +++-
 2 files changed, 103 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6243eabb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 70086e6..accfc38 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1038,9 +1038,9 @@ public class BlockManager implements BlockStatsMXBean {
 }
 
 final int numNodes = blocksMap.numNodes(blk);
-final boolean isCorrupt = numCorruptNodes != 0 &&
-numCorruptNodes == numNodes;
-final int numMachines = isCorrupt ? numNodes: numNodes - numCorruptNodes;
+final boolean isCorrupt = numCorruptReplicas != 0 &&
+numCorruptReplicas == numNodes;
+final int numMachines = isCorrupt ? numNodes: numNodes - 
numCorruptReplicas;
 final DatanodeStorageInfo[] machines = new 
DatanodeStorageInfo[numMachines];
 final byte[] blockIndices = blk.isStriped() ? new byte[numMachines] : null;
 int j = 0, i = 0;
@@ -1366,11 +1366,22 @@ public class BlockManager implements BlockStatsMXBean {
   + " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid()
   + ") does not exist");
 }
-
+DatanodeStorageInfo storage = null;
+if (storageID != null) {
+  storage = node.getStorageInfo(storageID);
+}
+if (storage == null) {
+  storage = storedBlock.findStorageInfo(node);
+}
+
+if (storage == null) {
+  blockLog.debug("BLOCK* findAndMarkBlockAsCorrupt: {} not found on {}",
+  blk, dn);
+  return;
+}
 markBlockAsCorrupt(new BlockToMarkCorrupt(reportedBlock, storedBlock,
 blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED),
-storageID == null ? null : node.getStorageInfo(storageID),
-node);
+storage, node);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6243eabb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index c1a7ebb..011baa1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -18,15 +18,22 @@
 
 package org.apache.hadoop.hdfs;
 
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
+import java.io.File;
+import java.io.IOException;
 import java.io.FileOutputStream;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Random;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
@@ -36,6 +43,8 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 

hadoop git commit: HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks on failed storages. Contributed by Kuhu Shukla.

2016-04-28 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 92548e09c -> a3ece8b5b


HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks on 
failed storages. Contributed by Kuhu Shukla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3ece8b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3ece8b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3ece8b5

Branch: refs/heads/branch-2.7
Commit: a3ece8b5b4eec25a732773e1c1ded9bb7b449f33
Parents: 92548e0
Author: Kihwal Lee 
Authored: Thu Apr 28 16:47:04 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 28 16:47:04 2016 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../server/blockmanagement/BlockManager.java| 22 +++--
 .../apache/hadoop/hdfs/TestFileCorruption.java  | 90 +++-
 3 files changed, 109 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3ece8b5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 92362b6..4dd0149 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -164,6 +164,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-10245. Fix the findbugs warnings in branch-2.7.
 (Brahma Reddy Battula via aajisaka)
 
+HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks
+on failed storages. (Kuhu Shukla via kihwal)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3ece8b5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 94ac335..40d9e93 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -856,8 +856,8 @@ public class BlockManager {
 }
 
 final int numNodes = blocksMap.numNodes(blk);
-final boolean isCorrupt = numCorruptNodes == numNodes;
-final int numMachines = isCorrupt ? numNodes: numNodes - numCorruptNodes;
+final boolean isCorrupt = numCorruptReplicas == numNodes;
+final int numMachines = isCorrupt ? numNodes: numNodes - 
numCorruptReplicas;
 final DatanodeStorageInfo[] machines = new 
DatanodeStorageInfo[numMachines];
 int j = 0;
 if (numMachines > 0) {
@@ -1155,11 +1155,23 @@ public class BlockManager {
   + " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid()
   + ") does not exist");
 }
-
+
+DatanodeStorageInfo storage = null;
+if (storageID != null) {
+  storage = node.getStorageInfo(storageID);
+}
+if (storage == null) {
+  storage = storedBlock.findStorageInfo(node);
+}
+
+if (storage == null) {
+  blockLog.debug("BLOCK* findAndMarkBlockAsCorrupt: {} not found on {}",
+  blk, dn);
+  return;
+}
 markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock,
 blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED),
-storageID == null ? null : node.getStorageInfo(storageID),
-node);
+storage, node);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3ece8b5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index 8001bfb..d849c45 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -18,16 +18,23 @@
 
 package org.apache.hadoop.hdfs;
 
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import 

hadoop git commit: HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks on failed storages. Contributed by Kuhu Shukla.

2016-04-28 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7271e91b7 -> f715f1418


HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks on 
failed storages. Contributed by Kuhu Shukla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f715f141
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f715f141
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f715f141

Branch: refs/heads/branch-2
Commit: f715f141856cb6a4c6574893f40f9865653b631e
Parents: 7271e91
Author: Kihwal Lee 
Authored: Thu Apr 28 16:39:48 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 28 16:39:48 2016 -0500

--
 .../server/blockmanagement/BlockManager.java| 22 +++--
 .../apache/hadoop/hdfs/TestFileCorruption.java  | 87 +++-
 2 files changed, 103 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f715f141/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index d811d1d..58de45e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -953,8 +953,8 @@ public class BlockManager implements BlockStatsMXBean {
 }
 
 final int numNodes = blocksMap.numNodes(blk);
-final boolean isCorrupt = numCorruptNodes == numNodes;
-final int numMachines = isCorrupt ? numNodes: numNodes - numCorruptNodes;
+final boolean isCorrupt = numCorruptReplicas == numNodes;
+final int numMachines = isCorrupt ? numNodes: numNodes - 
numCorruptReplicas;
 final DatanodeStorageInfo[] machines = new 
DatanodeStorageInfo[numMachines];
 int j = 0;
 if (numMachines > 0) {
@@ -1232,11 +1232,23 @@ public class BlockManager implements BlockStatsMXBean {
   + " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid()
   + ") does not exist");
 }
-
+
+DatanodeStorageInfo storage = null;
+if (storageID != null) {
+  storage = node.getStorageInfo(storageID);
+}
+if (storage == null) {
+  storage = storedBlock.findStorageInfo(node);
+}
+
+if (storage == null) {
+  blockLog.debug("BLOCK* findAndMarkBlockAsCorrupt: {} not found on {}",
+  blk, dn);
+  return;
+}
 markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock,
 blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED),
-storageID == null ? null : node.getStorageInfo(storageID),
-node);
+storage, node);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f715f141/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index c1a7ebb..011baa1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -18,15 +18,22 @@
 
 package org.apache.hadoop.hdfs;
 
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
+import java.io.File;
+import java.io.IOException;
 import java.io.FileOutputStream;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Random;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
@@ -36,6 +43,8 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 

[35/50] [abbrv] hadoop git commit: HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks on failed storages. Contributed by Kuhu Shukla

2016-04-28 Thread wangda
HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks on 
failed storages. Contributed by Kuhu Shukla


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6243eabb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6243eabb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6243eabb

Branch: refs/heads/YARN-3368
Commit: 6243eabb48390fffada2418ade5adf9e0766afbe
Parents: cf2ee45
Author: Kihwal Lee 
Authored: Thu Apr 28 12:42:28 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 28 12:44:53 2016 -0500

--
 .../server/blockmanagement/BlockManager.java| 23 --
 .../apache/hadoop/hdfs/TestFileCorruption.java  | 87 +++-
 2 files changed, 103 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6243eabb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 70086e6..accfc38 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1038,9 +1038,9 @@ public class BlockManager implements BlockStatsMXBean {
 }
 
 final int numNodes = blocksMap.numNodes(blk);
-final boolean isCorrupt = numCorruptNodes != 0 &&
-numCorruptNodes == numNodes;
-final int numMachines = isCorrupt ? numNodes: numNodes - numCorruptNodes;
+final boolean isCorrupt = numCorruptReplicas != 0 &&
+numCorruptReplicas == numNodes;
+final int numMachines = isCorrupt ? numNodes: numNodes - 
numCorruptReplicas;
 final DatanodeStorageInfo[] machines = new 
DatanodeStorageInfo[numMachines];
 final byte[] blockIndices = blk.isStriped() ? new byte[numMachines] : null;
 int j = 0, i = 0;
@@ -1366,11 +1366,22 @@ public class BlockManager implements BlockStatsMXBean {
   + " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid()
   + ") does not exist");
 }
-
+DatanodeStorageInfo storage = null;
+if (storageID != null) {
+  storage = node.getStorageInfo(storageID);
+}
+if (storage == null) {
+  storage = storedBlock.findStorageInfo(node);
+}
+
+if (storage == null) {
+  blockLog.debug("BLOCK* findAndMarkBlockAsCorrupt: {} not found on {}",
+  blk, dn);
+  return;
+}
 markBlockAsCorrupt(new BlockToMarkCorrupt(reportedBlock, storedBlock,
 blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED),
-storageID == null ? null : node.getStorageInfo(storageID),
-node);
+storage, node);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6243eabb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index c1a7ebb..011baa1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -18,15 +18,22 @@
 
 package org.apache.hadoop.hdfs;
 
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
+import java.io.File;
+import java.io.IOException;
 import java.io.FileOutputStream;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Random;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
@@ -36,6 +43,8 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 

hadoop git commit: HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks on failed storages. Contributed by Kuhu Shukla

2016-04-28 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk cf2ee45f7 -> 6243eabb4


HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks on 
failed storages. Contributed by Kuhu Shukla


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6243eabb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6243eabb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6243eabb

Branch: refs/heads/trunk
Commit: 6243eabb48390fffada2418ade5adf9e0766afbe
Parents: cf2ee45
Author: Kihwal Lee 
Authored: Thu Apr 28 12:42:28 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 28 12:44:53 2016 -0500

--
 .../server/blockmanagement/BlockManager.java| 23 --
 .../apache/hadoop/hdfs/TestFileCorruption.java  | 87 +++-
 2 files changed, 103 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6243eabb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 70086e6..accfc38 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1038,9 +1038,9 @@ public class BlockManager implements BlockStatsMXBean {
 }
 
 final int numNodes = blocksMap.numNodes(blk);
-final boolean isCorrupt = numCorruptNodes != 0 &&
-numCorruptNodes == numNodes;
-final int numMachines = isCorrupt ? numNodes: numNodes - numCorruptNodes;
+final boolean isCorrupt = numCorruptReplicas != 0 &&
+numCorruptReplicas == numNodes;
+final int numMachines = isCorrupt ? numNodes: numNodes - 
numCorruptReplicas;
 final DatanodeStorageInfo[] machines = new 
DatanodeStorageInfo[numMachines];
 final byte[] blockIndices = blk.isStriped() ? new byte[numMachines] : null;
 int j = 0, i = 0;
@@ -1366,11 +1366,22 @@ public class BlockManager implements BlockStatsMXBean {
   + " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid()
   + ") does not exist");
 }
-
+DatanodeStorageInfo storage = null;
+if (storageID != null) {
+  storage = node.getStorageInfo(storageID);
+}
+if (storage == null) {
+  storage = storedBlock.findStorageInfo(node);
+}
+
+if (storage == null) {
+  blockLog.debug("BLOCK* findAndMarkBlockAsCorrupt: {} not found on {}",
+  blk, dn);
+  return;
+}
 markBlockAsCorrupt(new BlockToMarkCorrupt(reportedBlock, storedBlock,
 blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED),
-storageID == null ? null : node.getStorageInfo(storageID),
-node);
+storage, node);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6243eabb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index c1a7ebb..011baa1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -18,15 +18,22 @@
 
 package org.apache.hadoop.hdfs;
 
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
+import java.io.File;
+import java.io.IOException;
 import java.io.FileOutputStream;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Random;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
@@ -36,6 +43,8 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import