HDFS-8294. Erasure Coding: Fix Findbug warnings present in erasure coding. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7af05a3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7af05a3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7af05a3d

Branch: refs/heads/HDFS-7285
Commit: 7af05a3db4c731eca5e674b3d9e3b7abbf82ccd5
Parents: e53fa76
Author: Zhe Zhang <zhezh...@cloudera.com>
Authored: Thu May 21 14:40:14 2015 -0700
Committer: Zhe Zhang <zhezh...@cloudera.com>
Committed: Tue May 26 12:02:34 2015 -0700

----------------------------------------------------------------------
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt                |  3 +++
 .../org/apache/hadoop/hdfs/DFSStripedOutputStream.java  | 12 ++++++------
 .../BlockInfoStripedUnderConstruction.java              |  3 +++
 .../datanode/erasurecode/ErasureCodingWorker.java       |  4 ++--
 .../hdfs/server/namenode/ErasureCodingZoneManager.java  |  4 ++--
 .../org/apache/hadoop/hdfs/util/StripedBlockUtil.java   |  6 +++---
 6 files changed, 19 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7af05a3d/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 3bdff6f..c986f19 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -247,3 +247,6 @@
 
     HDFS-8186. Erasure coding: Make block placement policy for EC file 
configurable.
     (Walter Su via zhz)
+
+    HDFS-8294. Erasure Coding: Fix Findbug warnings present in erasure coding.
+    (Rakesh R via zhz)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7af05a3d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 8eed6ad..515ce0c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -276,11 +276,11 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
     return getCurrentStreamer().getIndex();
   }
 
-  StripedDataStreamer getCurrentStreamer() {
+  private synchronized StripedDataStreamer getCurrentStreamer() {
     return (StripedDataStreamer)streamer;
   }
 
-  private StripedDataStreamer setCurrentStreamer(int i) {
+  private synchronized StripedDataStreamer setCurrentStreamer(int i) {
     streamer = streamers.get(i);
     return getCurrentStreamer();
   }
@@ -344,8 +344,8 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
     int ckOff = 0;
     while (byteBuffer.remaining() > 0) {
       DFSPacket p = createPacket(packetSize, chunksPerPacket,
-          streamer.getBytesCurBlock(),
-          streamer.getAndIncCurrentSeqno(), false);
+          getCurrentStreamer().getBytesCurBlock(),
+          getCurrentStreamer().getAndIncCurrentSeqno(), false);
       int maxBytesToPacket = p.getMaxChunks() * bytesPerChecksum;
       int toWrite = byteBuffer.remaining() > maxBytesToPacket ?
           maxBytesToPacket: byteBuffer.remaining();
@@ -353,7 +353,7 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
       p.writeChecksum(checksumBuf, ckOff, ckLen);
       ckOff += ckLen;
       p.writeData(byteBuffer, toWrite);
-      streamer.incBytesCurBlock(toWrite);
+      getCurrentStreamer().incBytesCurBlock(toWrite);
       packets.add(p);
     }
     return packets;
@@ -529,7 +529,7 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
     if (!current.isFailed()) {
       try {
         for (DFSPacket p : generatePackets(buffer, checksumBuf)) {
-          streamer.waitAndQueuePacket(p);
+          getCurrentStreamer().waitAndQueuePacket(p);
         }
         endBlock();
       } catch(Exception e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7af05a3d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
index 40f880f..76d7920 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
@@ -189,6 +189,9 @@ public class BlockInfoStripedUnderConstruction extends 
BlockInfoStriped
       NameNode.blockStateChangeLog.warn("BLOCK*" +
           " BlockInfoStripedUnderConstruction.initLeaseRecovery:" +
           " No blocks found, lease removed.");
+      // sets primary node index and return.
+      primaryNodeIndex = -1;
+      return;
     }
     boolean allLiveReplicasTriedAsPrimary = true;
     for (ReplicaUnderConstruction replica : replicas) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7af05a3d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index d227de8..ded51eb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -251,7 +251,7 @@ public final class ErasureCodingWorker {
     private final long[] blockOffset4Targets;
     private final long[] seqNo4Targets;
 
-    private final int WRITE_PACKET_SIZE = 64 * 1024;
+    private final static int WRITE_PACKET_SIZE = 64 * 1024;
     private DataChecksum checksum;
     private int maxChunksPerPacket;
     private byte[] packetBuf;
@@ -904,7 +904,7 @@ public final class ErasureCodingWorker {
     }
   }
 
-  private class StripedReader {
+  private static class StripedReader {
     private final short index;
     private BlockReader blockReader;
     private ByteBuffer buffer;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7af05a3d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
index 371b8ac..89fecc6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
@@ -92,8 +92,8 @@ public class ErasureCodingZoneManager {
           String schemaName = WritableUtils.readString(dIn);
           ECSchema schema = dir.getFSNamesystem().getECSchemaManager()
               .getSchema(schemaName);
-          return new ErasureCodingZoneInfo(inode.getFullPathName(), schema,
-              cellSize);
+          return new ErasureCodingZoneInfo(dir.getInode(inode.getId())
+              .getFullPathName(), schema, cellSize);
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7af05a3d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
index 0b09f37..38dc61a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
@@ -105,7 +105,7 @@ public class StripedBlockUtil {
     final ExtendedBlock blk = constructInternalBlock(
         bg.getBlock(), cellSize, dataBlkNum, idxInBlockGroup);
 
-    final long offset = bg.getStartOffset() + idxInBlockGroup * cellSize;
+    final long offset = bg.getStartOffset() + idxInBlockGroup * (long) 
cellSize;
     if (idxInReturnedLocs < bg.getLocations().length) {
       return new LocatedBlock(blk,
           new DatanodeInfo[]{bg.getLocations()[idxInReturnedLocs]},
@@ -406,11 +406,11 @@ public class StripedBlockUtil {
     long earliestStart = startOffsets[firstCell.idxInStripe];
     for (int i = 1; i < dataBlkNum; i++) {
       int idx = firstCellIdxInBG + i;
-      if (idx * cellSize >= blockGroup.getBlockSize()) {
+      if (idx * (long) cellSize >= blockGroup.getBlockSize()) {
         break;
       }
       StripingCell cell = new StripingCell(ecSchema, cellSize, idx);
-      startOffsets[cell.idxInStripe] = cell.idxInInternalBlk * cellSize;
+      startOffsets[cell.idxInStripe] = cell.idxInInternalBlk * (long) cellSize;
       if (startOffsets[cell.idxInStripe] < earliestStart) {
         earliestStart = startOffsets[cell.idxInStripe];
       }

Reply via email to