http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index ae0a3f6..d16d8a8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -207,34 +207,34 @@ public class PBHelperClient {
   public static ExtendedBlockProto convert(final ExtendedBlock b) {
     if (b == null) return null;
     return ExtendedBlockProto.newBuilder().
-      setPoolId(b.getBlockPoolId()).
-      setBlockId(b.getBlockId()).
-      setNumBytes(b.getNumBytes()).
-      setGenerationStamp(b.getGenerationStamp()).
-      build();
+        setPoolId(b.getBlockPoolId()).
+        setBlockId(b.getBlockId()).
+        setNumBytes(b.getNumBytes()).
+        setGenerationStamp(b.getGenerationStamp()).
+        build();
   }
 
   public static TokenProto convert(Token<?> tok) {
     return TokenProto.newBuilder().
-      setIdentifier(ByteString.copyFrom(tok.getIdentifier())).
-      setPassword(ByteString.copyFrom(tok.getPassword())).
-      setKind(tok.getKind().toString()).
-      setService(tok.getService().toString()).build();
+        setIdentifier(ByteString.copyFrom(tok.getIdentifier())).
+        setPassword(ByteString.copyFrom(tok.getPassword())).
+        setKind(tok.getKind().toString()).
+        setService(tok.getService().toString()).build();
   }
 
   public static ShortCircuitShmIdProto convert(ShmId shmId) {
     return ShortCircuitShmIdProto.newBuilder().
-      setHi(shmId.getHi()).
-      setLo(shmId.getLo()).
-      build();
+        setHi(shmId.getHi()).
+        setLo(shmId.getLo()).
+        build();
 
   }
 
   public static ShortCircuitShmSlotProto convert(SlotId slotId) {
     return ShortCircuitShmSlotProto.newBuilder().
-      setShmId(convert(slotId.getShmId())).
-      setSlotIdx(slotId.getSlotIdx()).
-      build();
+        setShmId(convert(slotId.getShmId())).
+        setSlotIdx(slotId.getSlotIdx()).
+        build();
   }
 
   public static DatanodeIDProto convert(DatanodeID dn) {
@@ -242,23 +242,24 @@ public class PBHelperClient {
     // which is the same as the DatanodeUuid. Since StorageID is a required
     // field we pass the empty string if the DatanodeUuid is not yet known.
     return DatanodeIDProto.newBuilder()
-      .setIpAddr(dn.getIpAddr())
-      .setHostName(dn.getHostName())
-      .setXferPort(dn.getXferPort())
-      .setDatanodeUuid(dn.getDatanodeUuid() != null ? dn.getDatanodeUuid() : 
"")
-      .setInfoPort(dn.getInfoPort())
-      .setInfoSecurePort(dn.getInfoSecurePort())
-      .setIpcPort(dn.getIpcPort()).build();
+        .setIpAddr(dn.getIpAddr())
+        .setHostName(dn.getHostName())
+        .setXferPort(dn.getXferPort())
+        .setDatanodeUuid(dn.getDatanodeUuid() != null ?
+            dn.getDatanodeUuid() : "")
+        .setInfoPort(dn.getInfoPort())
+        .setInfoSecurePort(dn.getInfoSecurePort())
+        .setIpcPort(dn.getIpcPort()).build();
   }
 
   public static DatanodeInfoProto.AdminState convert(
-    final DatanodeInfo.AdminStates inAs) {
+      final DatanodeInfo.AdminStates inAs) {
     switch (inAs) {
-      case NORMAL: return  DatanodeInfoProto.AdminState.NORMAL;
-      case DECOMMISSION_INPROGRESS:
-        return DatanodeInfoProto.AdminState.DECOMMISSION_INPROGRESS;
-      case DECOMMISSIONED: return DatanodeInfoProto.AdminState.DECOMMISSIONED;
-      default: return DatanodeInfoProto.AdminState.NORMAL;
+    case NORMAL: return  DatanodeInfoProto.AdminState.NORMAL;
+    case DECOMMISSION_INPROGRESS:
+      return DatanodeInfoProto.AdminState.DECOMMISSION_INPROGRESS;
+    case DECOMMISSIONED: return DatanodeInfoProto.AdminState.DECOMMISSIONED;
+    default: return DatanodeInfoProto.AdminState.NORMAL;
     }
   }
 
@@ -271,23 +272,23 @@ public class PBHelperClient {
       builder.setUpgradeDomain(info.getUpgradeDomain());
     }
     builder
-      .setId(convert((DatanodeID) info))
-      .setCapacity(info.getCapacity())
-      .setDfsUsed(info.getDfsUsed())
-      .setRemaining(info.getRemaining())
-      .setBlockPoolUsed(info.getBlockPoolUsed())
-      .setCacheCapacity(info.getCacheCapacity())
-      .setCacheUsed(info.getCacheUsed())
-      .setLastUpdate(info.getLastUpdate())
-      .setLastUpdateMonotonic(info.getLastUpdateMonotonic())
-      .setXceiverCount(info.getXceiverCount())
-      .setAdminState(convert(info.getAdminState()))
-      .build();
+        .setId(convert((DatanodeID) info))
+        .setCapacity(info.getCapacity())
+        .setDfsUsed(info.getDfsUsed())
+        .setRemaining(info.getRemaining())
+        .setBlockPoolUsed(info.getBlockPoolUsed())
+        .setCacheCapacity(info.getCacheCapacity())
+        .setCacheUsed(info.getCacheUsed())
+        .setLastUpdate(info.getLastUpdate())
+        .setLastUpdateMonotonic(info.getLastUpdateMonotonic())
+        .setXceiverCount(info.getXceiverCount())
+        .setAdminState(convert(info.getAdminState()))
+        .build();
     return builder.build();
   }
 
   public static List<? extends HdfsProtos.DatanodeInfoProto> convert(
-    DatanodeInfo[] dnInfos) {
+      DatanodeInfo[] dnInfos) {
     return convert(dnInfos, 0);
   }
 
@@ -296,11 +297,11 @@ public class PBHelperClient {
    * {@code startIdx}.
    */
   public static List<? extends HdfsProtos.DatanodeInfoProto> convert(
-    DatanodeInfo[] dnInfos, int startIdx) {
+      DatanodeInfo[] dnInfos, int startIdx) {
     if (dnInfos == null)
       return null;
     ArrayList<HdfsProtos.DatanodeInfoProto> protos = Lists
-      .newArrayListWithCapacity(dnInfos.length);
+        .newArrayListWithCapacity(dnInfos.length);
     for (int i = startIdx; i < dnInfos.length; i++) {
       protos.add(convert(dnInfos[i]));
     }
@@ -337,48 +338,48 @@ public class PBHelperClient {
 
   public static StorageTypeProto convertStorageType(StorageType type) {
     switch(type) {
-      case DISK:
-        return StorageTypeProto.DISK;
-      case SSD:
-        return StorageTypeProto.SSD;
-      case ARCHIVE:
-        return StorageTypeProto.ARCHIVE;
-      case RAM_DISK:
-        return StorageTypeProto.RAM_DISK;
-      default:
-        throw new IllegalStateException(
+    case DISK:
+      return StorageTypeProto.DISK;
+    case SSD:
+      return StorageTypeProto.SSD;
+    case ARCHIVE:
+      return StorageTypeProto.ARCHIVE;
+    case RAM_DISK:
+      return StorageTypeProto.RAM_DISK;
+    default:
+      throw new IllegalStateException(
           "BUG: StorageType not found, type=" + type);
     }
   }
 
   public static StorageType convertStorageType(StorageTypeProto type) {
     switch(type) {
-      case DISK:
-        return StorageType.DISK;
-      case SSD:
-        return StorageType.SSD;
-      case ARCHIVE:
-        return StorageType.ARCHIVE;
-      case RAM_DISK:
-        return StorageType.RAM_DISK;
-      default:
-        throw new IllegalStateException(
+    case DISK:
+      return StorageType.DISK;
+    case SSD:
+      return StorageType.SSD;
+    case ARCHIVE:
+      return StorageType.ARCHIVE;
+    case RAM_DISK:
+      return StorageType.RAM_DISK;
+    default:
+      throw new IllegalStateException(
           "BUG: StorageTypeProto not found, type=" + type);
     }
   }
 
   public static List<StorageTypeProto> convertStorageTypes(
-    StorageType[] types) {
+      StorageType[] types) {
     return convertStorageTypes(types, 0);
   }
 
   public static List<StorageTypeProto> convertStorageTypes(
-    StorageType[] types, int startIdx) {
+      StorageType[] types, int startIdx) {
     if (types == null) {
       return null;
     }
     final List<StorageTypeProto> protos = new ArrayList<>(
-      types.length);
+        types.length);
     for (int i = startIdx; i < types.length; ++i) {
       protos.add(convertStorageType(types[i]));
     }
@@ -386,7 +387,7 @@ public class PBHelperClient {
   }
 
   public static InputStream vintPrefixed(final InputStream input)
-    throws IOException {
+      throws IOException {
     final int firstByte = input.read();
     if (firstByte == -1) {
       throw new EOFException("Premature EOF: no length prefix available");
@@ -438,8 +439,8 @@ public class PBHelperClient {
 
   public static HdfsProtos.CipherOptionProto convert(CipherOption option) {
     if (option != null) {
-      HdfsProtos.CipherOptionProto.Builder builder = 
HdfsProtos.CipherOptionProto.
-          newBuilder();
+      HdfsProtos.CipherOptionProto.Builder builder =
+          HdfsProtos.CipherOptionProto.newBuilder();
       if (option.getCipherSuite() != null) {
         builder.setSuite(convert(option.getCipherSuite()));
       }
@@ -514,7 +515,8 @@ public class PBHelperClient {
       storageIDs = null;
     } else {
       Preconditions.checkState(storageIDsCount == locs.size());
-      storageIDs = proto.getStorageIDsList().toArray(new 
String[storageIDsCount]);
+      storageIDs = proto.getStorageIDsList()
+          .toArray(new String[storageIDsCount]);
     }
 
     // Set values from the isCached list, re-using references from loc
@@ -550,7 +552,7 @@ public class PBHelperClient {
       List<StorageTypeProto> storageTypesList, int expectedSize) {
     final StorageType[] storageTypes = new StorageType[expectedSize];
     if (storageTypesList.size() != expectedSize) {
-     // missing storage types
+      // missing storage types
       Preconditions.checkState(storageTypesList.isEmpty());
       Arrays.fill(storageTypes, StorageType.DEFAULT);
     } else {
@@ -570,9 +572,9 @@ public class PBHelperClient {
 
   // DatanodeId
   public static DatanodeID convert(DatanodeIDProto dn) {
-    return new DatanodeID(dn.getIpAddr(), dn.getHostName(), 
dn.getDatanodeUuid(),
-        dn.getXferPort(), dn.getInfoPort(), dn.hasInfoSecurePort() ? dn
-        .getInfoSecurePort() : 0, dn.getIpcPort());
+    return new DatanodeID(dn.getIpAddr(), dn.getHostName(),
+        dn.getDatanodeUuid(), dn.getXferPort(), dn.getInfoPort(),
+        dn.hasInfoSecurePort() ? dn.getInfoSecurePort() : 0, dn.getIpcPort());
   }
 
   public static AdminStates convert(AdminState adminState) {
@@ -611,8 +613,8 @@ public class PBHelperClient {
     return policies;
   }
 
-  public static EventBatchList convert(GetEditsFromTxidResponseProto resp) 
throws
-    IOException {
+  public static EventBatchList convert(GetEditsFromTxidResponseProto resp)
+      throws IOException {
     final InotifyProtos.EventsListProto list = resp.getEventsList();
     final long firstTxid = list.getFirstTxid();
     final long lastTxid = list.getLastTxid();
@@ -631,82 +633,82 @@ public class PBHelperClient {
       List<Event> events = Lists.newArrayList();
       for (InotifyProtos.EventProto p : bp.getEventsList()) {
         switch (p.getType()) {
-          case EVENT_CLOSE:
-            InotifyProtos.CloseEventProto close =
-                InotifyProtos.CloseEventProto.parseFrom(p.getContents());
-            events.add(new Event.CloseEvent(close.getPath(),
-                close.getFileSize(), close.getTimestamp()));
-            break;
-          case EVENT_CREATE:
-            InotifyProtos.CreateEventProto create =
-                InotifyProtos.CreateEventProto.parseFrom(p.getContents());
-            events.add(new Event.CreateEvent.Builder()
-                .iNodeType(createTypeConvert(create.getType()))
-                .path(create.getPath())
-                .ctime(create.getCtime())
-                .ownerName(create.getOwnerName())
-                .groupName(create.getGroupName())
-                .perms(convert(create.getPerms()))
-                .replication(create.getReplication())
-                .symlinkTarget(create.getSymlinkTarget().isEmpty() ? null :
-                    create.getSymlinkTarget())
-                .defaultBlockSize(create.getDefaultBlockSize())
-                .overwrite(create.getOverwrite()).build());
-            break;
-          case EVENT_METADATA:
-            InotifyProtos.MetadataUpdateEventProto meta =
-                
InotifyProtos.MetadataUpdateEventProto.parseFrom(p.getContents());
-            events.add(new Event.MetadataUpdateEvent.Builder()
-                .path(meta.getPath())
-                .metadataType(metadataUpdateTypeConvert(meta.getType()))
-                .mtime(meta.getMtime())
-                .atime(meta.getAtime())
-                .replication(meta.getReplication())
-                .ownerName(
-                    meta.getOwnerName().isEmpty() ? null : meta.getOwnerName())
-                .groupName(
-                    meta.getGroupName().isEmpty() ? null : meta.getGroupName())
-                .perms(meta.hasPerms() ? convert(meta.getPerms()) : null)
-                .acls(meta.getAclsList().isEmpty() ? null : convertAclEntry(
-                    meta.getAclsList()))
-                .xAttrs(meta.getXAttrsList().isEmpty() ? null : convertXAttrs(
-                    meta.getXAttrsList()))
-                .xAttrsRemoved(meta.getXAttrsRemoved())
-                .build());
-            break;
-          case EVENT_RENAME:
-            InotifyProtos.RenameEventProto rename =
-                InotifyProtos.RenameEventProto.parseFrom(p.getContents());
-            events.add(new Event.RenameEvent.Builder()
-                  .srcPath(rename.getSrcPath())
-                  .dstPath(rename.getDestPath())
-                  .timestamp(rename.getTimestamp())
-                  .build());
-            break;
-          case EVENT_APPEND:
-            InotifyProtos.AppendEventProto append =
-                InotifyProtos.AppendEventProto.parseFrom(p.getContents());
-            events.add(new Event.AppendEvent.Builder().path(append.getPath())
-                .newBlock(append.hasNewBlock() && append.getNewBlock())
-                .build());
-            break;
-          case EVENT_UNLINK:
-            InotifyProtos.UnlinkEventProto unlink =
-                InotifyProtos.UnlinkEventProto.parseFrom(p.getContents());
-            events.add(new Event.UnlinkEvent.Builder()
-                  .path(unlink.getPath())
-                  .timestamp(unlink.getTimestamp())
-                  .build());
-            break;
-          case EVENT_TRUNCATE:
-            InotifyProtos.TruncateEventProto truncate =
-                InotifyProtos.TruncateEventProto.parseFrom(p.getContents());
-            events.add(new Event.TruncateEvent(truncate.getPath(),
-                truncate.getFileSize(), truncate.getTimestamp()));
-            break;
-          default:
-            throw new RuntimeException("Unexpected inotify event type: " +
-                p.getType());
+        case EVENT_CLOSE:
+          InotifyProtos.CloseEventProto close =
+              InotifyProtos.CloseEventProto.parseFrom(p.getContents());
+          events.add(new Event.CloseEvent(close.getPath(),
+              close.getFileSize(), close.getTimestamp()));
+          break;
+        case EVENT_CREATE:
+          InotifyProtos.CreateEventProto create =
+              InotifyProtos.CreateEventProto.parseFrom(p.getContents());
+          events.add(new Event.CreateEvent.Builder()
+              .iNodeType(createTypeConvert(create.getType()))
+              .path(create.getPath())
+              .ctime(create.getCtime())
+              .ownerName(create.getOwnerName())
+              .groupName(create.getGroupName())
+              .perms(convert(create.getPerms()))
+              .replication(create.getReplication())
+              .symlinkTarget(create.getSymlinkTarget().isEmpty() ? null :
+                  create.getSymlinkTarget())
+              .defaultBlockSize(create.getDefaultBlockSize())
+              .overwrite(create.getOverwrite()).build());
+          break;
+        case EVENT_METADATA:
+          InotifyProtos.MetadataUpdateEventProto meta =
+              
InotifyProtos.MetadataUpdateEventProto.parseFrom(p.getContents());
+          events.add(new Event.MetadataUpdateEvent.Builder()
+              .path(meta.getPath())
+              .metadataType(metadataUpdateTypeConvert(meta.getType()))
+              .mtime(meta.getMtime())
+              .atime(meta.getAtime())
+              .replication(meta.getReplication())
+              .ownerName(
+                  meta.getOwnerName().isEmpty() ? null : meta.getOwnerName())
+              .groupName(
+                  meta.getGroupName().isEmpty() ? null : meta.getGroupName())
+              .perms(meta.hasPerms() ? convert(meta.getPerms()) : null)
+              .acls(meta.getAclsList().isEmpty() ? null : convertAclEntry(
+                  meta.getAclsList()))
+              .xAttrs(meta.getXAttrsList().isEmpty() ? null : convertXAttrs(
+                  meta.getXAttrsList()))
+              .xAttrsRemoved(meta.getXAttrsRemoved())
+              .build());
+          break;
+        case EVENT_RENAME:
+          InotifyProtos.RenameEventProto rename =
+              InotifyProtos.RenameEventProto.parseFrom(p.getContents());
+          events.add(new Event.RenameEvent.Builder()
+              .srcPath(rename.getSrcPath())
+              .dstPath(rename.getDestPath())
+              .timestamp(rename.getTimestamp())
+              .build());
+          break;
+        case EVENT_APPEND:
+          InotifyProtos.AppendEventProto append =
+              InotifyProtos.AppendEventProto.parseFrom(p.getContents());
+          events.add(new Event.AppendEvent.Builder().path(append.getPath())
+              .newBlock(append.hasNewBlock() && append.getNewBlock())
+              .build());
+          break;
+        case EVENT_UNLINK:
+          InotifyProtos.UnlinkEventProto unlink =
+              InotifyProtos.UnlinkEventProto.parseFrom(p.getContents());
+          events.add(new Event.UnlinkEvent.Builder()
+              .path(unlink.getPath())
+              .timestamp(unlink.getTimestamp())
+              .build());
+          break;
+        case EVENT_TRUNCATE:
+          InotifyProtos.TruncateEventProto truncate =
+              InotifyProtos.TruncateEventProto.parseFrom(p.getContents());
+          events.add(new Event.TruncateEvent(truncate.getPath(),
+              truncate.getFileSize(), truncate.getTimestamp()));
+          break;
+        default:
+          throw new RuntimeException("Unexpected inotify event type: " +
+              p.getType());
         }
       }
       batches.add(new EventBatch(txid, events.toArray(new Event[0])));
@@ -878,7 +880,7 @@ public class PBHelperClient {
   }
 
   static InotifyProtos.INodeType createTypeConvert(Event.CreateEvent.INodeType
-                                                       type) {
+      type) {
     switch (type) {
     case DIRECTORY:
       return InotifyProtos.INodeType.I_TYPE_DIRECTORY;
@@ -1090,7 +1092,7 @@ public class PBHelperClient {
     String poolName = checkNotNull(proto.getPoolName());
     CachePoolInfo info = new CachePoolInfo(poolName);
     if (proto.hasOwnerName()) {
-        info.setOwnerName(proto.getOwnerName());
+      info.setOwnerName(proto.getOwnerName());
     }
     if (proto.hasGroupName()) {
       info.setGroupName(proto.getGroupName());
@@ -1138,8 +1140,7 @@ public class PBHelperClient {
     return builder.build();
   }
 
-  public static CacheDirectiveInfoProto convert
-      (CacheDirectiveInfo info) {
+  public static CacheDirectiveInfoProto convert(CacheDirectiveInfo info) {
     CacheDirectiveInfoProto.Builder builder =
         CacheDirectiveInfoProto.newBuilder();
     if (info.getId() != null) {
@@ -1184,10 +1185,8 @@ public class PBHelperClient {
     return builder.build();
   }
 
-  public static CacheDirectiveInfo convert
-      (CacheDirectiveInfoProto proto) {
-    CacheDirectiveInfo.Builder builder =
-        new CacheDirectiveInfo.Builder();
+  public static CacheDirectiveInfo convert(CacheDirectiveInfoProto proto) {
+    CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder();
     if (proto.hasId()) {
       builder.setId(proto.getId());
     }
@@ -1223,7 +1222,8 @@ public class PBHelperClient {
     return value;
   }
 
-  public static SnapshotDiffReport convert(SnapshotDiffReportProto 
reportProto) {
+  public static SnapshotDiffReport convert(
+      SnapshotDiffReportProto reportProto) {
     if (reportProto == null) {
       return null;
     }
@@ -1442,8 +1442,7 @@ public class PBHelperClient {
     }
   }
 
-  public static SafeModeActionProto convert(
-      SafeModeAction a) {
+  public static SafeModeActionProto convert(SafeModeAction a) {
     switch (a) {
     case SAFEMODE_LEAVE:
       return SafeModeActionProto.SAFEMODE_LEAVE;
@@ -1469,16 +1468,18 @@ public class PBHelperClient {
     result[ClientProtocol.GET_STATS_CAPACITY_IDX] = res.getCapacity();
     result[ClientProtocol.GET_STATS_USED_IDX] = res.getUsed();
     result[ClientProtocol.GET_STATS_REMAINING_IDX] = res.getRemaining();
-    result[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX] = 
res.getUnderReplicated();
-    result[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX] = 
res.getCorruptBlocks();
-    result[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX] = 
res.getMissingBlocks();
+    result[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX] =
+        res.getUnderReplicated();
+    result[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX] =
+        res.getCorruptBlocks();
+    result[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX] =
+        res.getMissingBlocks();
     result[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX] =
         res.getMissingReplOneBlocks();
     return result;
   }
 
-  public static DatanodeReportTypeProto
-    convert(DatanodeReportType t) {
+  public static DatanodeReportTypeProto convert(DatanodeReportType t) {
     switch (t) {
     case ALL: return DatanodeReportTypeProto.ALL;
     case LIVE: return DatanodeReportTypeProto.LIVE;
@@ -1636,8 +1637,8 @@ public class PBHelperClient {
       DatanodeStorageReport[] reports) {
     final List<DatanodeStorageReportProto> protos
         = new ArrayList<>(reports.length);
-    for(int i = 0; i < reports.length; i++) {
-      protos.add(convertDatanodeStorageReport(reports[i]));
+    for (DatanodeStorageReport report : reports) {
+      protos.add(convertDatanodeStorageReport(report));
     }
     return protos;
   }
@@ -1682,20 +1683,20 @@ public class PBHelperClient {
   public static FsServerDefaultsProto convert(FsServerDefaults fs) {
     if (fs == null) return null;
     return FsServerDefaultsProto.newBuilder().
-      setBlockSize(fs.getBlockSize()).
-      setBytesPerChecksum(fs.getBytesPerChecksum()).
-      setWritePacketSize(fs.getWritePacketSize())
-      .setReplication(fs.getReplication())
-      .setFileBufferSize(fs.getFileBufferSize())
-      .setEncryptDataTransfer(fs.getEncryptDataTransfer())
-      .setTrashInterval(fs.getTrashInterval())
-      .setChecksumType(convert(fs.getChecksumType()))
-      .build();
+        setBlockSize(fs.getBlockSize()).
+        setBytesPerChecksum(fs.getBytesPerChecksum()).
+        setWritePacketSize(fs.getWritePacketSize())
+        .setReplication(fs.getReplication())
+        .setFileBufferSize(fs.getFileBufferSize())
+        .setEncryptDataTransfer(fs.getEncryptDataTransfer())
+        .setTrashInterval(fs.getTrashInterval())
+        .setChecksumType(convert(fs.getChecksumType()))
+        .build();
   }
 
   public static EnumSetWritable<CreateFlag> convertCreateFlag(int flag) {
     EnumSet<CreateFlag> result =
-       EnumSet.noneOf(CreateFlag.class);
+        EnumSet.noneOf(CreateFlag.class);
     if ((flag & CreateFlagProto.APPEND_VALUE) == CreateFlagProto.APPEND_VALUE) 
{
       result.add(CreateFlag.APPEND);
     }
@@ -1714,7 +1715,7 @@ public class PBHelperClient {
         == CreateFlagProto.NEW_BLOCK_VALUE) {
       result.add(CreateFlag.NEW_BLOCK);
     }
-    return new EnumSetWritable<CreateFlag>(result, CreateFlag.class);
+    return new EnumSetWritable<>(result, CreateFlag.class);
   }
 
   public static EnumSet<CacheFlag> convertCacheFlags(int flags) {
@@ -1736,20 +1737,20 @@ public class PBHelperClient {
     }
 
     HdfsFileStatusProto.Builder builder =
-     HdfsFileStatusProto.newBuilder().
-      setLength(fs.getLen()).
-      setFileType(fType).
-      setBlockReplication(fs.getReplication()).
-      setBlocksize(fs.getBlockSize()).
-      setModificationTime(fs.getModificationTime()).
-      setAccessTime(fs.getAccessTime()).
-      setPermission(convert(fs.getPermission())).
-      setOwner(fs.getOwner()).
-      setGroup(fs.getGroup()).
-      setFileId(fs.getFileId()).
-      setChildrenNum(fs.getChildrenNum()).
-      setPath(ByteString.copyFrom(fs.getLocalNameInBytes())).
-      setStoragePolicy(fs.getStoragePolicy());
+        HdfsFileStatusProto.newBuilder().
+            setLength(fs.getLen()).
+            setFileType(fType).
+            setBlockReplication(fs.getReplication()).
+            setBlocksize(fs.getBlockSize()).
+            setModificationTime(fs.getModificationTime()).
+            setAccessTime(fs.getAccessTime()).
+            setPermission(convert(fs.getPermission())).
+            setOwner(fs.getOwner()).
+            setGroup(fs.getGroup()).
+            setFileId(fs.getFileId()).
+            setChildrenNum(fs.getChildrenNum()).
+            setPath(ByteString.copyFrom(fs.getLocalNameInBytes())).
+            setStoragePolicy(fs.getStoragePolicy());
     if (fs.isSymlink())  {
       builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes()));
     }
@@ -1779,9 +1780,11 @@ public class PBHelperClient {
     HdfsFileStatusProto fs = convert(status.getDirStatus());
     SnapshottableDirectoryStatusProto.Builder builder =
         SnapshottableDirectoryStatusProto
-        .newBuilder().setSnapshotNumber(snapshotNumber)
-        .setSnapshotQuota(snapshotQuota).setParentFullpath(parentFullPathBytes)
-        .setDirStatus(fs);
+            .newBuilder()
+            .setSnapshotNumber(snapshotNumber)
+            .setSnapshotQuota(snapshotQuota)
+            .setParentFullpath(parentFullPathBytes)
+            .setDirStatus(fs);
     return builder.build();
   }
 
@@ -1816,14 +1819,15 @@ public class PBHelperClient {
       result.setRemaining(fsStats[ClientProtocol.GET_STATS_REMAINING_IDX]);
     if (fsStats.length >= ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX + 1)
       result.setUnderReplicated(
-              fsStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]);
+          fsStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]);
     if (fsStats.length >= ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX + 1)
       result.setCorruptBlocks(
           fsStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX]);
     if (fsStats.length >= ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX + 1)
       result.setMissingBlocks(
           fsStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX]);
-    if (fsStats.length >= ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX 
+ 1)
+    if (fsStats.length >=
+        ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX + 1)
       result.setMissingReplOneBlocks(
           fsStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX]);
     return result.build();
@@ -1901,7 +1905,7 @@ public class PBHelperClient {
   public static ContentSummaryProto convert(ContentSummary cs) {
     if (cs == null) return null;
     ContentSummaryProto.Builder builder = ContentSummaryProto.newBuilder();
-        builder.setLength(cs.getLength()).
+    builder.setLength(cs.getLength()).
         setFileCount(cs.getFileCount()).
         setDirectoryCount(cs.getDirectoryCount()).
         setQuota(cs.getQuota()).
@@ -1951,11 +1955,11 @@ public class PBHelperClient {
     return builder.build();
   }
 
-  public static List<StorageReportProto> convertStorageReports(StorageReport[] 
storages) {
-    final List<StorageReportProto> protos = new ArrayList<StorageReportProto>(
-        storages.length);
-    for(int i = 0; i < storages.length; i++) {
-      protos.add(convert(storages[i]));
+  public static List<StorageReportProto> convertStorageReports(
+      StorageReport[] storages) {
+    final List<StorageReportProto> protos = new ArrayList<>(storages.length);
+    for (StorageReport storage : storages) {
+      protos.add(convert(storage));
     }
     return protos;
   }
@@ -1978,17 +1982,16 @@ public class PBHelperClient {
     if (entry == null) {
       return null;
     }
-    ByteString sourcePath = ByteString
-        .copyFrom(entry.getSourcePath() == null ? DFSUtilClient.EMPTY_BYTES : 
entry
-            .getSourcePath());
+    ByteString sourcePath = ByteString.copyFrom(entry.getSourcePath() == null ?
+        DFSUtilClient.EMPTY_BYTES : entry.getSourcePath());
     String modification = entry.getType().getLabel();
     SnapshotDiffReportEntryProto.Builder builder = SnapshotDiffReportEntryProto
         .newBuilder().setFullpath(sourcePath)
         .setModificationLabel(modification);
     if (entry.getType() == DiffType.RENAME) {
-      ByteString targetPath = ByteString
-          .copyFrom(entry.getTargetPath() == null ? DFSUtilClient.EMPTY_BYTES 
: entry
-              .getTargetPath());
+      ByteString targetPath =
+          ByteString.copyFrom(entry.getTargetPath() == null ?
+              DFSUtilClient.EMPTY_BYTES : entry.getTargetPath());
       builder.setTargetPath(targetPath);
     }
     return builder.build();
@@ -2006,12 +2009,11 @@ public class PBHelperClient {
         entryProtos.add(entryProto);
     }
 
-    SnapshotDiffReportProto reportProto = SnapshotDiffReportProto.newBuilder()
+    return SnapshotDiffReportProto.newBuilder()
         .setSnapshotRoot(report.getSnapshotRoot())
         .setFromSnapshot(report.getFromSnapshot())
         .setToSnapshot(report.getLaterSnapshotName())
         .addAllDiffReportEntries(entryProtos).build();
-    return reportProto;
   }
 
   public static CacheDirectiveStatsProto convert(CacheDirectiveStats stats) {
@@ -2034,7 +2036,7 @@ public class PBHelperClient {
   }
 
   public static boolean[] convertBooleanList(
-    List<Boolean> targetPinningsList) {
+      List<Boolean> targetPinningsList) {
     final boolean[] targetPinnings = new boolean[targetPinningsList.size()];
     for (int i = 0; i < targetPinningsList.size(); i++) {
       targetPinnings[i] = targetPinningsList.get(i);
@@ -2060,7 +2062,8 @@ public class PBHelperClient {
   }
 
   public static DatanodeLocalInfoProto convert(DatanodeLocalInfo info) {
-    DatanodeLocalInfoProto.Builder builder = 
DatanodeLocalInfoProto.newBuilder();
+    DatanodeLocalInfoProto.Builder builder =
+        DatanodeLocalInfoProto.newBuilder();
     builder.setSoftwareVersion(info.getSoftwareVersion());
     builder.setConfigVersion(info.getConfigVersion());
     builder.setUptime(info.getUptime());
@@ -2116,9 +2119,9 @@ public class PBHelperClient {
   }
 
   public static ListXAttrsResponseProto convertListXAttrsResponse(
-    List<XAttr> names) {
+      List<XAttr> names) {
     ListXAttrsResponseProto.Builder builder =
-      ListXAttrsResponseProto.newBuilder();
+        ListXAttrsResponseProto.newBuilder();
     if (names != null) {
       builder.addAllXAttrs(convertXAttrProto(names));
     }
@@ -2140,114 +2143,115 @@ public class PBHelperClient {
         slotId.getSlotIdx());
   }
 
-  public static GetEditsFromTxidResponseProto 
convertEditsResponse(EventBatchList el) {
+  public static GetEditsFromTxidResponseProto convertEditsResponse(
+      EventBatchList el) {
     InotifyProtos.EventsListProto.Builder builder =
         InotifyProtos.EventsListProto.newBuilder();
     for (EventBatch b : el.getBatches()) {
       List<InotifyProtos.EventProto> events = Lists.newArrayList();
       for (Event e : b.getEvents()) {
         switch (e.getEventType()) {
-          case CLOSE:
-            Event.CloseEvent ce = (Event.CloseEvent) e;
-            events.add(InotifyProtos.EventProto.newBuilder()
-                .setType(InotifyProtos.EventType.EVENT_CLOSE)
-                .setContents(
-                    InotifyProtos.CloseEventProto.newBuilder()
-                        .setPath(ce.getPath())
-                        .setFileSize(ce.getFileSize())
-                        .setTimestamp(ce.getTimestamp()).build().toByteString()
-                ).build());
-            break;
-          case CREATE:
-            Event.CreateEvent ce2 = (Event.CreateEvent) e;
-            events.add(InotifyProtos.EventProto.newBuilder()
-                .setType(InotifyProtos.EventType.EVENT_CREATE)
-                .setContents(
-                    InotifyProtos.CreateEventProto.newBuilder()
-                        .setType(createTypeConvert(ce2.getiNodeType()))
-                        .setPath(ce2.getPath())
-                        .setCtime(ce2.getCtime())
-                        .setOwnerName(ce2.getOwnerName())
-                        .setGroupName(ce2.getGroupName())
-                        .setPerms(convert(ce2.getPerms()))
-                        .setReplication(ce2.getReplication())
-                        .setSymlinkTarget(ce2.getSymlinkTarget() == null ?
-                            "" : ce2.getSymlinkTarget())
-                        .setDefaultBlockSize(ce2.getDefaultBlockSize())
-                        
.setOverwrite(ce2.getOverwrite()).build().toByteString()
-                ).build());
-            break;
-          case METADATA:
-            Event.MetadataUpdateEvent me = (Event.MetadataUpdateEvent) e;
-            InotifyProtos.MetadataUpdateEventProto.Builder metaB =
-                InotifyProtos.MetadataUpdateEventProto.newBuilder()
-                    .setPath(me.getPath())
-                    .setType(metadataUpdateTypeConvert(me.getMetadataType()))
-                    .setMtime(me.getMtime())
-                    .setAtime(me.getAtime())
-                    .setReplication(me.getReplication())
-                    .setOwnerName(me.getOwnerName() == null ? "" :
-                        me.getOwnerName())
-                    .setGroupName(me.getGroupName() == null ? "" :
-                        me.getGroupName())
-                    .addAllAcls(me.getAcls() == null ?
-                        Lists.<AclEntryProto>newArrayList() :
-                        convertAclEntryProto(me.getAcls()))
-                    .addAllXAttrs(me.getxAttrs() == null ?
-                        Lists.<XAttrProto>newArrayList() :
-                        convertXAttrProto(me.getxAttrs()))
-                    .setXAttrsRemoved(me.isxAttrsRemoved());
-            if (me.getPerms() != null) {
-              metaB.setPerms(convert(me.getPerms()));
-            }
-            events.add(InotifyProtos.EventProto.newBuilder()
-                .setType(InotifyProtos.EventType.EVENT_METADATA)
-                .setContents(metaB.build().toByteString())
-                .build());
-            break;
-          case RENAME:
-            Event.RenameEvent re = (Event.RenameEvent) e;
-            events.add(InotifyProtos.EventProto.newBuilder()
-                .setType(InotifyProtos.EventType.EVENT_RENAME)
-                .setContents(
-                    InotifyProtos.RenameEventProto.newBuilder()
-                        .setSrcPath(re.getSrcPath())
-                        .setDestPath(re.getDstPath())
-                        .setTimestamp(re.getTimestamp()).build().toByteString()
-                ).build());
-            break;
-          case APPEND:
-            Event.AppendEvent re2 = (Event.AppendEvent) e;
-            events.add(InotifyProtos.EventProto.newBuilder()
-                .setType(InotifyProtos.EventType.EVENT_APPEND)
-                .setContents(InotifyProtos.AppendEventProto.newBuilder()
-                    .setPath(re2.getPath())
-                    .setNewBlock(re2.toNewBlock()).build().toByteString())
-                .build());
-            break;
-          case UNLINK:
-            Event.UnlinkEvent ue = (Event.UnlinkEvent) e;
-            events.add(InotifyProtos.EventProto.newBuilder()
-                .setType(InotifyProtos.EventType.EVENT_UNLINK)
-                .setContents(
-                    InotifyProtos.UnlinkEventProto.newBuilder()
-                        .setPath(ue.getPath())
-                        .setTimestamp(ue.getTimestamp()).build().toByteString()
-                ).build());
-            break;
-          case TRUNCATE:
-            Event.TruncateEvent te = (Event.TruncateEvent) e;
-            events.add(InotifyProtos.EventProto.newBuilder()
-                .setType(InotifyProtos.EventType.EVENT_TRUNCATE)
-                .setContents(
-                    InotifyProtos.TruncateEventProto.newBuilder()
-                        .setPath(te.getPath())
-                        .setFileSize(te.getFileSize())
-                        .setTimestamp(te.getTimestamp()).build().toByteString()
-                ).build());
-            break;
-          default:
-            throw new RuntimeException("Unexpected inotify event: " + e);
+        case CLOSE:
+          Event.CloseEvent ce = (Event.CloseEvent) e;
+          events.add(InotifyProtos.EventProto.newBuilder()
+              .setType(InotifyProtos.EventType.EVENT_CLOSE)
+              .setContents(
+                  InotifyProtos.CloseEventProto.newBuilder()
+                      .setPath(ce.getPath())
+                      .setFileSize(ce.getFileSize())
+                      .setTimestamp(ce.getTimestamp()).build().toByteString()
+              ).build());
+          break;
+        case CREATE:
+          Event.CreateEvent ce2 = (Event.CreateEvent) e;
+          events.add(InotifyProtos.EventProto.newBuilder()
+              .setType(InotifyProtos.EventType.EVENT_CREATE)
+              .setContents(
+                  InotifyProtos.CreateEventProto.newBuilder()
+                      .setType(createTypeConvert(ce2.getiNodeType()))
+                      .setPath(ce2.getPath())
+                      .setCtime(ce2.getCtime())
+                      .setOwnerName(ce2.getOwnerName())
+                      .setGroupName(ce2.getGroupName())
+                      .setPerms(convert(ce2.getPerms()))
+                      .setReplication(ce2.getReplication())
+                      .setSymlinkTarget(ce2.getSymlinkTarget() == null ?
+                          "" : ce2.getSymlinkTarget())
+                      .setDefaultBlockSize(ce2.getDefaultBlockSize())
+                      .setOverwrite(ce2.getOverwrite()).build().toByteString()
+              ).build());
+          break;
+        case METADATA:
+          Event.MetadataUpdateEvent me = (Event.MetadataUpdateEvent) e;
+          InotifyProtos.MetadataUpdateEventProto.Builder metaB =
+              InotifyProtos.MetadataUpdateEventProto.newBuilder()
+                  .setPath(me.getPath())
+                  .setType(metadataUpdateTypeConvert(me.getMetadataType()))
+                  .setMtime(me.getMtime())
+                  .setAtime(me.getAtime())
+                  .setReplication(me.getReplication())
+                  .setOwnerName(me.getOwnerName() == null ? "" :
+                      me.getOwnerName())
+                  .setGroupName(me.getGroupName() == null ? "" :
+                      me.getGroupName())
+                  .addAllAcls(me.getAcls() == null ?
+                      Lists.<AclEntryProto>newArrayList() :
+                      convertAclEntryProto(me.getAcls()))
+                  .addAllXAttrs(me.getxAttrs() == null ?
+                      Lists.<XAttrProto>newArrayList() :
+                      convertXAttrProto(me.getxAttrs()))
+                  .setXAttrsRemoved(me.isxAttrsRemoved());
+          if (me.getPerms() != null) {
+            metaB.setPerms(convert(me.getPerms()));
+          }
+          events.add(InotifyProtos.EventProto.newBuilder()
+              .setType(InotifyProtos.EventType.EVENT_METADATA)
+              .setContents(metaB.build().toByteString())
+              .build());
+          break;
+        case RENAME:
+          Event.RenameEvent re = (Event.RenameEvent) e;
+          events.add(InotifyProtos.EventProto.newBuilder()
+              .setType(InotifyProtos.EventType.EVENT_RENAME)
+              .setContents(
+                  InotifyProtos.RenameEventProto.newBuilder()
+                      .setSrcPath(re.getSrcPath())
+                      .setDestPath(re.getDstPath())
+                      .setTimestamp(re.getTimestamp()).build().toByteString()
+              ).build());
+          break;
+        case APPEND:
+          Event.AppendEvent re2 = (Event.AppendEvent) e;
+          events.add(InotifyProtos.EventProto.newBuilder()
+              .setType(InotifyProtos.EventType.EVENT_APPEND)
+              .setContents(InotifyProtos.AppendEventProto.newBuilder()
+                  .setPath(re2.getPath())
+                  .setNewBlock(re2.toNewBlock()).build().toByteString())
+              .build());
+          break;
+        case UNLINK:
+          Event.UnlinkEvent ue = (Event.UnlinkEvent) e;
+          events.add(InotifyProtos.EventProto.newBuilder()
+              .setType(InotifyProtos.EventType.EVENT_UNLINK)
+              .setContents(
+                  InotifyProtos.UnlinkEventProto.newBuilder()
+                      .setPath(ue.getPath())
+                      .setTimestamp(ue.getTimestamp()).build().toByteString()
+              ).build());
+          break;
+        case TRUNCATE:
+          Event.TruncateEvent te = (Event.TruncateEvent) e;
+          events.add(InotifyProtos.EventProto.newBuilder()
+              .setType(InotifyProtos.EventType.EVENT_TRUNCATE)
+              .setContents(
+                  InotifyProtos.TruncateEventProto.newBuilder()
+                      .setPath(te.getPath())
+                      .setFileSize(te.getFileSize())
+                      .setTimestamp(te.getTimestamp()).build().toByteString()
+              ).build());
+          break;
+        default:
+          throw new RuntimeException("Unexpected inotify event: " + e);
         }
       }
       builder.addBatch(InotifyProtos.EventBatchProto.newBuilder().

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java
index e8d7439..4544b0e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java
@@ -34,7 +34,8 @@ import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdenti
 @InterfaceAudience.Private
 public class DelegationTokenIdentifier
     extends AbstractDelegationTokenIdentifier {
-  public static final Text HDFS_DELEGATION_KIND = new 
Text("HDFS_DELEGATION_TOKEN");
+  public static final Text HDFS_DELEGATION_KIND =
+      new Text("HDFS_DELEGATION_TOKEN");
 
   /**
    * Create an empty delegation token identifier for reading into.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java
index 16f73b4..35dfed0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java
@@ -39,7 +39,7 @@ public class DelegationTokenSelector
 
   /**
    * Select the delegation token for hdfs.  The port will be rewritten to
-   * the port of hdfs.service.host_$nnAddr, or the default rpc namenode port. 
+   * the port of hdfs.service.host_$nnAddr, or the default rpc namenode port.
    * This method should only be called by non-hdfs filesystems that do not
    * use the rpc port to acquire tokens.  Ex. webhdfs, hftp 
    * @param nnUri of the remote namenode
@@ -56,15 +56,15 @@ public class DelegationTokenSelector
     // and correctly determine the value
     Text serviceName = SecurityUtil.buildTokenService(nnUri);
     final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName);
-    
+
     int nnRpcPort = HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
     if (nnServiceName != null) {
-      nnRpcPort = NetUtils.createSocketAddr(nnServiceName, 
nnRpcPort).getPort(); 
+      nnRpcPort = NetUtils.createSocketAddr(nnServiceName, 
nnRpcPort).getPort();
     }
     // use original hostname from the uri to avoid unintentional host resolving
     serviceName = SecurityUtil.buildTokenService(
                NetUtils.createSocketAddrForHost(nnUri.getHost(), nnRpcPort));
-    
+
     return selectToken(serviceName, tokens);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
index d298690..eb19492 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
@@ -52,11 +52,11 @@ public class BlockMetadataHeader {
       BlockMetadataHeader.class);
 
   public static final short VERSION = 1;
-  
+
   /**
    * Header includes everything except the checksum(s) themselves.
    * Version is two bytes. Following it is the DataChecksum
-   * that occupies 5 bytes. 
+   * that occupies 5 bytes.
    */
   private final short version;
   private DataChecksum checksum = null;
@@ -66,7 +66,7 @@ public class BlockMetadataHeader {
     this.checksum = checksum;
     this.version = version;
   }
-  
+
   /** Get the version */
   public short getVersion() {
     return version;
@@ -137,13 +137,14 @@ public class BlockMetadataHeader {
    * @return Metadata Header
    * @throws IOException
    */
-  public static BlockMetadataHeader readHeader(DataInputStream in) throws 
IOException {
+  public static BlockMetadataHeader readHeader(DataInputStream in)
+      throws IOException {
     return readHeader(in.readShort(), in);
   }
-  
+
   /**
    * Reads header at the top of metadata file and returns the header.
-   * 
+   *
    * @return metadata header for the block
    * @throws IOException
    */
@@ -157,39 +158,40 @@ public class BlockMetadataHeader {
       IOUtils.closeStream(in);
     }
   }
-  
+
   /**
    * Read the header at the beginning of the given block meta file.
    * The current file position will be altered by this method.
    * If an error occurs, the file is <em>not</em> closed.
    */
-  public static BlockMetadataHeader readHeader(RandomAccessFile raf) throws 
IOException {
+  public static BlockMetadataHeader readHeader(RandomAccessFile raf)
+      throws IOException {
     byte[] buf = new byte[getHeaderSize()];
     raf.seek(0);
     raf.readFully(buf, 0, buf.length);
     return readHeader(new DataInputStream(new ByteArrayInputStream(buf)));
   }
-  
+
   // Version is already read.
-  private static BlockMetadataHeader readHeader(short version, DataInputStream 
in) 
-                                   throws IOException {
+  private static BlockMetadataHeader readHeader(short version,
+      DataInputStream in) throws IOException {
     DataChecksum checksum = DataChecksum.newDataChecksum(in);
     return new BlockMetadataHeader(version, checksum);
   }
-  
+
   /**
    * This writes all the fields till the beginning of checksum.
    * @param out DataOutputStream
    * @throws IOException
    */
   @VisibleForTesting
-  public static void writeHeader(DataOutputStream out, 
-                                  BlockMetadataHeader header) 
+  public static void writeHeader(DataOutputStream out,
+                                  BlockMetadataHeader header)
                                   throws IOException {
     out.writeShort(header.getVersion());
     header.getChecksum().writeHeader(out);
   }
-  
+
   /**
    * Writes all the fields till the beginning of checksum.
    * @throws IOException on error

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/CachingStrategy.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/CachingStrategy.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/CachingStrategy.java
index 215df13..a9d4314 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/CachingStrategy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/CachingStrategy.java
@@ -23,7 +23,7 @@ package org.apache.hadoop.hdfs.server.datanode;
 public class CachingStrategy {
   private final Boolean dropBehind; // null = use server defaults
   private final Long readahead; // null = use server defaults
-  
+
   public static CachingStrategy newDefaultStrategy() {
     return new CachingStrategy(null, null);
   }
@@ -64,7 +64,7 @@ public class CachingStrategy {
   public Boolean getDropBehind() {
     return dropBehind;
   }
-  
+
   public Long getReadahead() {
     return readahead;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
index b159d3a..90f257f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
@@ -25,19 +25,21 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 
 /**
  * Exception indicating that DataNode does not have a replica
- * that matches the target block.  
+ * that matches the target block.
  */
 public class ReplicaNotFoundException extends IOException {
   private static final long serialVersionUID = 1L;
-  public final static String NON_RBW_REPLICA = "Cannot recover a non-RBW 
replica ";
-  public final static String UNFINALIZED_REPLICA = 
-    "Cannot append to an unfinalized replica ";
-  public final static String UNFINALIZED_AND_NONRBW_REPLICA = 
-    "Cannot recover append/close to a replica that's not FINALIZED and not RBW 
";
+  public final static String NON_RBW_REPLICA =
+      "Cannot recover a non-RBW replica ";
+  public final static String UNFINALIZED_REPLICA =
+      "Cannot append to an unfinalized replica ";
+  public final static String UNFINALIZED_AND_NONRBW_REPLICA =
+      "Cannot recover append/close to a replica that's not FINALIZED and not 
RBW"
+          + " ";
   public final static String NON_EXISTENT_REPLICA =
-    "Cannot append to a non-existent replica ";
+      "Cannot append to a non-existent replica ";
   public final static String UNEXPECTED_GS_REPLICA =
-    "Cannot append to a replica with unexpected generation stamp ";
+      "Cannot append to a replica with unexpected generation stamp ";
 
   public ReplicaNotFoundException() {
     super();
@@ -46,7 +48,7 @@ public class ReplicaNotFoundException extends IOException {
   public ReplicaNotFoundException(ExtendedBlock b) {
     super("Replica not found for " + b);
   }
-  
+
   public ReplicaNotFoundException(String msg) {
     super(msg);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java
index 8080bcf..67c6586 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java
@@ -25,8 +25,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 
 /**
  * This exception is thrown when the name node is in safe mode.
- * Client cannot modified namespace until the safe mode is off. 
- * 
+ * Client cannot modified namespace until the safe mode is off.
+ *
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -35,4 +35,4 @@ public class SafeModeException extends IOException {
   public SafeModeException(String msg) {
     super(msg);
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
index 78cd160..e0fdb32 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
@@ -20,22 +20,21 @@ package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.retry.FailoverProxyProvider;
 
 public abstract class AbstractNNFailoverProxyProvider<T> implements
-   FailoverProxyProvider <T> {
+    FailoverProxyProvider <T> {
 
   private AtomicBoolean fallbackToSimpleAuth;
 
   /**
    * Inquire whether logical HA URI is used for the implementation. If it is
-   * used, a special token handling may be needed to make sure a token 
acquired 
-   * from a node in the HA pair can be used against the other node. 
+   * used, a special token handling may be needed to make sure a token acquired
+   * from a node in the HA pair can be used against the other node.
    *
    * @return true if logical HA URI is used. false, if not used.
    */
-  public abstract boolean useLogicalURI(); 
+  public abstract boolean useLogicalURI();
 
   /**
    * Set for tracking if a secure client falls back to simple auth.  This 
method

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/WrappedFailoverProxyProvider.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/WrappedFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/WrappedFailoverProxyProvider.java
index 0b387b7..689aebc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/WrappedFailoverProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/WrappedFailoverProxyProvider.java
@@ -30,14 +30,14 @@ import org.apache.hadoop.io.retry.FailoverProxyProvider;
 public class WrappedFailoverProxyProvider<T> extends
     AbstractNNFailoverProxyProvider<T> {
   private final FailoverProxyProvider<T> proxyProvider;
-  
+
   /**
    * Wrap the given instance of an old FailoverProxyProvider.
    */
   public WrappedFailoverProxyProvider(FailoverProxyProvider<T> provider) {
     proxyProvider = provider;
   }
-    
+
   @Override
   public Class<T> getInterface() {
     return proxyProvider.getInterface();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java
index 0c8b6c93..770c41b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java
@@ -28,22 +28,24 @@ public class DatanodeStorage {
   /** The state of the storage. */
   public enum State {
     NORMAL,
-    
+
     /**
-     * A storage that represents a read-only path to replicas stored on a 
shared storage device.
-     * Replicas on {@link #READ_ONLY_SHARED} storage are not counted towards 
live replicas.
-     * 
+     * A storage that represents a read-only path to replicas stored on a 
shared
+     * storage device. Replicas on {@link #READ_ONLY_SHARED} storage are not
+     * counted towards live replicas.
+     *
      * <p>
-     * In certain implementations, a {@link #READ_ONLY_SHARED} storage may be 
correlated to 
-     * its {@link #NORMAL} counterpart using the {@link 
DatanodeStorage#storageID}.  This
-     * property should be used for debugging purposes only.
-     * </p> 
+     * In certain implementations, a {@link #READ_ONLY_SHARED} storage may be
+     * correlated to its {@link #NORMAL} counterpart using the
+     * {@link DatanodeStorage#storageID}.  This property should be used for
+     * debugging purposes only.
+     * </p>
      */
     READ_ONLY_SHARED,
 
-    FAILED;
+    FAILED
   }
-  
+
   private final String storageID;
   private final State state;
   private final StorageType storageType;
@@ -91,10 +93,9 @@ public class DatanodeStorage {
     try {
       // Attempt to parse the UUID.
       if (storageID != null && storageID.indexOf(STORAGE_ID_PREFIX) == 0) {
-        UUID.fromString(storageID.substring(STORAGE_ID_PREFIX.length()));
         return true;
       }
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalArgumentException ignored) {
     }
 
     return false;
@@ -104,7 +105,7 @@ public class DatanodeStorage {
   public String toString() {
     return "DatanodeStorage["+ storageID + "," + storageType + "," + state 
+"]";
   }
-  
+
   @Override
   public boolean equals(Object other){
     if (other == this) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java
index 6a956a0..5bf2a72 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java
@@ -39,4 +39,4 @@ public class DatanodeStorageReport {
   public StorageReport[] getStorageReports() {
     return storageReports;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java
index 5fd5733..3fbc424 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java
@@ -29,7 +29,7 @@ public class StorageReport {
   private final long blockPoolUsed;
 
   public static final StorageReport[] EMPTY_ARRAY = {};
-  
+
   public StorageReport(DatanodeStorage storage, boolean failed,
       long capacity, long dfsUsed, long remaining, long bpUsed) {
     this.storage = storage;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ClientMmap.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ClientMmap.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ClientMmap.java
index 2d871fc..f07a9f4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ClientMmap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ClientMmap.java
@@ -31,12 +31,12 @@ import org.slf4j.LoggerFactory;
 @InterfaceAudience.Private
 public class ClientMmap implements Closeable {
   static final Logger LOG = LoggerFactory.getLogger(ClientMmap.class);
-  
+
   /**
    * A reference to the block replica which this mmap relates to.
    */
   private ShortCircuitReplica replica;
-  
+
   /**
    * The java ByteBuffer object.
    */
@@ -72,4 +72,4 @@ public class ClientMmap implements Closeable {
   public MappedByteBuffer getMappedByteBuffer() {
     return map;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
index 4ffc108..c421fe8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
@@ -51,14 +51,15 @@ import org.slf4j.LoggerFactory;
 
 /**
  * Manages short-circuit memory segments for an HDFS client.
- * 
- * Clients are responsible for requesting and releasing shared memory segments 
used
- * for communicating with the DataNode. The client will try to allocate new 
slots
- * in the set of existing segments, falling back to getting a new segment from 
the
- * DataNode via {@link DataTransferProtocol#requestShortCircuitFds}.
- * 
- * The counterpart to this class on the DataNode is {@link 
ShortCircuitRegistry}.
- * See {@link ShortCircuitRegistry} for more information on the communication 
protocol.
+ *
+ * Clients are responsible for requesting and releasing shared memory segments
+ * used for communicating with the DataNode. The client will try to allocate 
new
+ * slots in the set of existing segments, falling back to getting a new segment
+ * from the DataNode via {@link DataTransferProtocol#requestShortCircuitFds}.
+ *
+ * The counterpart to this class on the DataNode is
+ * {@link ShortCircuitRegistry}. See {@link ShortCircuitRegistry} for more
+ * information on the communication protocol.
  */
 @InterfaceAudience.Private
 public class DfsClientShmManager implements Closeable {
@@ -79,16 +80,14 @@ public class DfsClientShmManager implements Closeable {
      *
      * Protected by the manager lock.
      */
-    private final TreeMap<ShmId, DfsClientShm> full =
-        new TreeMap<ShmId, DfsClientShm>();
+    private final TreeMap<ShmId, DfsClientShm> full = new TreeMap<>();
 
     /**
      * Shared memory segments which have at least one empty slot.
      *
      * Protected by the manager lock.
      */
-    private final TreeMap<ShmId, DfsClientShm> notFull =
-        new TreeMap<ShmId, DfsClientShm>();
+    private final TreeMap<ShmId, DfsClientShm> notFull = new TreeMap<>();
 
     /**
      * True if this datanode doesn't support short-circuit shared memory
@@ -157,11 +156,11 @@ public class DfsClientShmManager implements Closeable {
      */
     private DfsClientShm requestNewShm(String clientName, DomainPeer peer)
         throws IOException {
-      final DataOutputStream out = 
+      final DataOutputStream out =
           new DataOutputStream(
               new BufferedOutputStream(peer.getOutputStream()));
       new Sender(out).requestShortCircuitShm(clientName);
-      ShortCircuitShmResponseProto resp = 
+      ShortCircuitShmResponseProto resp =
           ShortCircuitShmResponseProto.parseFrom(
             PBHelperClient.vintPrefixed(peer.getInputStream()));
       String error = resp.hasError() ? resp.getError() : "(unknown)";
@@ -179,7 +178,7 @@ public class DfsClientShmManager implements Closeable {
               "pass a file descriptor for the shared memory segment.");
         }
         try {
-          DfsClientShm shm = 
+          DfsClientShm shm =
               new DfsClientShm(PBHelperClient.convert(resp.getId()),
                   fis[0], this, peer);
           LOG.trace("{}: createNewShm: created {}", this, shm);
@@ -278,7 +277,7 @@ public class DfsClientShmManager implements Closeable {
         }
       }
     }
-    
+
     /**
      * Stop tracking a slot.
      *
@@ -302,7 +301,7 @@ public class DfsClientShmManager implements Closeable {
         full.remove(shmId); // The shm can't be full if we just freed a slot.
         if (shm.isEmpty()) {
           notFull.remove(shmId);
-  
+
           // If the shared memory segment is now empty, we call shutdown(2) on
           // the UNIX domain socket associated with it.  The 
DomainSocketWatcher,
           // which is watching this socket, will call DfsClientShm#handle,
@@ -327,7 +326,7 @@ public class DfsClientShmManager implements Closeable {
         }
       }
     }
-    
+
     /**
      * Unregister a shared memory segment.
      *
@@ -383,8 +382,8 @@ public class DfsClientShmManager implements Closeable {
    * Information about each Datanode.
    */
   private final HashMap<DatanodeInfo, EndpointShmManager> datanodes =
-      new HashMap<DatanodeInfo, EndpointShmManager>(1);
-  
+      new HashMap<>(1);
+
   /**
    * The DomainSocketWatcher which keeps track of the UNIX domain socket
    * associated with each shared memory segment.
@@ -396,12 +395,12 @@ public class DfsClientShmManager implements Closeable {
    * methods are off-limits unless you release the manager lock first.
    */
   private final DomainSocketWatcher domainSocketWatcher;
-  
+
   DfsClientShmManager(int interruptCheckPeriodMs) throws IOException {
     this.domainSocketWatcher = new DomainSocketWatcher(interruptCheckPeriodMs,
         "client");
   }
-  
+
   public Slot allocSlot(DatanodeInfo datanode, DomainPeer peer,
       MutableBoolean usedPeer, ExtendedBlockId blockId,
       String clientName) throws IOException {
@@ -421,7 +420,7 @@ public class DfsClientShmManager implements Closeable {
       lock.unlock();
     }
   }
-  
+
   public void freeSlot(Slot slot) {
     lock.lock();
     try {
@@ -456,8 +455,7 @@ public class DfsClientShmManager implements Closeable {
   public void visit(Visitor visitor) throws IOException {
     lock.lock();
     try {
-      HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info = 
-          new HashMap<DatanodeInfo, PerDatanodeVisitorInfo>();
+      HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info = new HashMap<>();
       for (Entry<DatanodeInfo, EndpointShmManager> entry :
             datanodes.entrySet()) {
         info.put(entry.getKey(), entry.getValue().getVisitorInfo());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
index 6a7d39d..57baee1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
@@ -82,11 +82,10 @@ public class DomainSocketFactory {
     public PathState getPathState() {
       return state;
     }
-    
+
     @Override
     public String toString() {
-      return new StringBuilder().append("PathInfo{path=").append(path).
-          append(", state=").append(state).append("}").toString();
+      return "PathInfo{path=" + path + ", state=" + state + "}";
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
index 07f5064..62ade70 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
@@ -107,26 +107,26 @@ public class ShortCircuitCache implements Closeable {
 
         int numDemoted = demoteOldEvictableMmaped(curMs);
         int numPurged = 0;
-        Long evictionTimeNs = Long.valueOf(0);
+        Long evictionTimeNs = (long) 0;
         while (true) {
-          Entry<Long, ShortCircuitReplica> entry = 
+          Entry<Long, ShortCircuitReplica> entry =
               evictable.ceilingEntry(evictionTimeNs);
           if (entry == null) break;
           evictionTimeNs = entry.getKey();
-          long evictionTimeMs = 
+          long evictionTimeMs =
               TimeUnit.MILLISECONDS.convert(evictionTimeNs, 
TimeUnit.NANOSECONDS);
           if (evictionTimeMs + maxNonMmappedEvictableLifespanMs >= curMs) 
break;
           ShortCircuitReplica replica = entry.getValue();
           if (LOG.isTraceEnabled()) {
-            LOG.trace("CacheCleaner: purging " + replica + ": " + 
-                  StringUtils.getStackTrace(Thread.currentThread()));
+            LOG.trace("CacheCleaner: purging " + replica + ": " +
+                StringUtils.getStackTrace(Thread.currentThread()));
           }
           purge(replica);
           numPurged++;
         }
 
         LOG.debug("{}: finishing cache cleaner run started at {}. Demoted {} "
-            + "mmapped replicas; purged {} replicas.",
+                + "mmapped replicas; purged {} replicas.",
             this, curMs, numDemoted, numPurged);
       } finally {
         ShortCircuitCache.this.lock.unlock();
@@ -236,26 +236,25 @@ public class ShortCircuitCache implements Closeable {
    * The executor service that runs the cacheCleaner.
    */
   private final ScheduledThreadPoolExecutor cleanerExecutor
-  = new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder().
-          setDaemon(true).setNameFormat("ShortCircuitCache_Cleaner").
-          build());
+      = new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder().
+      setDaemon(true).setNameFormat("ShortCircuitCache_Cleaner").
+      build());
 
   /**
    * The executor service that runs the cacheCleaner.
    */
   private final ScheduledThreadPoolExecutor releaserExecutor
       = new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder().
-          setDaemon(true).setNameFormat("ShortCircuitCache_SlotReleaser").
-          build());
+      setDaemon(true).setNameFormat("ShortCircuitCache_SlotReleaser").
+      build());
 
   /**
    * A map containing all ShortCircuitReplicaInfo objects, organized by Key.
    * ShortCircuitReplicaInfo objects may contain a replica, or an InvalidToken
    * exception.
    */
-  private final HashMap<ExtendedBlockId, Waitable<ShortCircuitReplicaInfo>> 
-      replicaInfoMap = new HashMap<ExtendedBlockId,
-          Waitable<ShortCircuitReplicaInfo>>();
+  private final HashMap<ExtendedBlockId, Waitable<ShortCircuitReplicaInfo>>
+      replicaInfoMap = new HashMap<>();
 
   /**
    * The CacheCleaner.  We don't create this and schedule it until it becomes
@@ -268,8 +267,7 @@ public class ShortCircuitCache implements Closeable {
    *
    * Maps (unique) insertion time in nanoseconds to the element.
    */
-  private final TreeMap<Long, ShortCircuitReplica> evictable =
-      new TreeMap<Long, ShortCircuitReplica>();
+  private final TreeMap<Long, ShortCircuitReplica> evictable = new TreeMap<>();
 
   /**
    * Maximum total size of the cache, including both mmapped and
@@ -288,7 +286,7 @@ public class ShortCircuitCache implements Closeable {
    * Maps (unique) insertion time in nanoseconds to the element.
    */
   private final TreeMap<Long, ShortCircuitReplica> evictableMmapped =
-      new TreeMap<Long, ShortCircuitReplica>();
+      new TreeMap<>();
 
   /**
    * Maximum number of mmaped evictable elements.
@@ -435,13 +433,13 @@ public class ShortCircuitCache implements Closeable {
       if (newRefCount == 0) {
         // Close replica, since there are no remaining references to it.
         Preconditions.checkArgument(replica.purged,
-          "Replica %s reached a refCount of 0 without being purged", replica);
+            "Replica %s reached a refCount of 0 without being purged", 
replica);
         replica.close();
       } else if (newRefCount == 1) {
         Preconditions.checkState(null == replica.getEvictableTimeNs(),
             "Replica %s had a refCount higher than 1, " +
-              "but was still evictable (evictableTimeNs = %d)",
-              replica, replica.getEvictableTimeNs());
+                "but was still evictable (evictableTimeNs = %d)",
+            replica, replica.getEvictableTimeNs());
         if (!replica.purged) {
           // Add the replica to the end of an eviction list.
           // Eviction lists are sorted by time.
@@ -457,7 +455,7 @@ public class ShortCircuitCache implements Closeable {
       } else {
         Preconditions.checkArgument(replica.refCount >= 0,
             "replica's refCount went negative (refCount = %d" +
-            " for %s)", replica.refCount, replica);
+                " for %s)", replica.refCount, replica);
       }
       if (LOG.isTraceEnabled()) {
         LOG.trace(this + ": unref replica " + replica +
@@ -484,14 +482,14 @@ public class ShortCircuitCache implements Closeable {
   private int demoteOldEvictableMmaped(long now) {
     int numDemoted = 0;
     boolean needMoreSpace = false;
-    Long evictionTimeNs = Long.valueOf(0);
+    Long evictionTimeNs = (long) 0;
 
     while (true) {
-      Entry<Long, ShortCircuitReplica> entry = 
+      Entry<Long, ShortCircuitReplica> entry =
           evictableMmapped.ceilingEntry(evictionTimeNs);
       if (entry == null) break;
       evictionTimeNs = entry.getKey();
-      long evictionTimeMs = 
+      long evictionTimeMs =
           TimeUnit.MILLISECONDS.convert(evictionTimeNs, TimeUnit.NANOSECONDS);
       if (evictionTimeMs + maxEvictableMmapedLifespanMs >= now) {
         if (evictableMmapped.size() < maxEvictableMmapedSize) {
@@ -501,7 +499,7 @@ public class ShortCircuitCache implements Closeable {
       }
       ShortCircuitReplica replica = entry.getValue();
       if (LOG.isTraceEnabled()) {
-        String rationale = needMoreSpace ? "because we need more space" : 
+        String rationale = needMoreSpace ? "because we need more space" :
             "because it's too old";
         LOG.trace("demoteOldEvictable: demoting " + replica + ": " +
             rationale + ": " +
@@ -530,13 +528,13 @@ public class ShortCircuitCache implements Closeable {
       }
       ShortCircuitReplica replica;
       if (evictableSize == 0) {
-       replica = evictableMmapped.firstEntry().getValue();
+        replica = evictableMmapped.firstEntry().getValue();
       } else {
-       replica = evictable.firstEntry().getValue();
+        replica = evictable.firstEntry().getValue();
       }
       if (LOG.isTraceEnabled()) {
         LOG.trace(this + ": trimEvictionMaps is purging " + replica +
-          StringUtils.getStackTrace(Thread.currentThread()));
+            StringUtils.getStackTrace(Thread.currentThread()));
       }
       purge(replica);
     }
@@ -677,13 +675,12 @@ public class ShortCircuitCache implements Closeable {
             info = fetch(key, waitable);
           } catch (RetriableException e) {
             LOG.debug("{}: retrying {}", this, e.getMessage());
-            continue;
           }
         }
       } while (false);
       if (info != null) return info;
       // We need to load the replica ourselves.
-      newWaitable = new Waitable<ShortCircuitReplicaInfo>(lock.newCondition());
+      newWaitable = new Waitable<>(lock.newCondition());
       replicaInfoMap.put(key, newWaitable);
     } finally {
       lock.unlock();
@@ -716,7 +713,7 @@ public class ShortCircuitCache implements Closeable {
     }
     if (info.getInvalidTokenException() != null) {
       LOG.info(this + ": could not get " + key + " due to InvalidToken " +
-            "exception.", info.getInvalidTokenException());
+          "exception.", info.getInvalidTokenException());
       return info;
     }
     ShortCircuitReplica replica = info.getReplica();
@@ -762,7 +759,7 @@ public class ShortCircuitCache implements Closeable {
         LOG.trace("{}: successfully loaded {}", this, info.getReplica());
         startCacheCleanerThreadIfNeeded();
         // Note: new ShortCircuitReplicas start with a refCount of 2,
-        // indicating that both this cache and whoever requested the 
+        // indicating that both this cache and whoever requested the
         // creation of the replica hold a reference.  So we don't need
         // to increment the reference count here.
       } else {
@@ -833,7 +830,7 @@ public class ShortCircuitCache implements Closeable {
     lock.lock();
     try {
       if (map == null) {
-        replica.mmapData = Long.valueOf(Time.monotonicNow());
+        replica.mmapData = Time.monotonicNow();
         newCond.signalAll();
         return null;
       } else {
@@ -920,12 +917,10 @@ public class ShortCircuitCache implements Closeable {
   public void accept(CacheVisitor visitor) {
     lock.lock();
     try {
-      Map<ExtendedBlockId, ShortCircuitReplica> replicas =
-          new HashMap<ExtendedBlockId, ShortCircuitReplica>();
-      Map<ExtendedBlockId, InvalidToken> failedLoads =
-          new HashMap<ExtendedBlockId, InvalidToken>();
+      Map<ExtendedBlockId, ShortCircuitReplica> replicas = new HashMap<>();
+      Map<ExtendedBlockId, InvalidToken> failedLoads = new HashMap<>();
       for (Entry<ExtendedBlockId, Waitable<ShortCircuitReplicaInfo>> entry :
-            replicaInfoMap.entrySet()) {
+          replicaInfoMap.entrySet()) {
         Waitable<ShortCircuitReplicaInfo> waitable = entry.getValue();
         if (waitable.hasVal()) {
           if (waitable.getVal().getReplica() != null) {
@@ -939,11 +934,11 @@ public class ShortCircuitCache implements Closeable {
         }
       }
       LOG.debug("visiting {} with outstandingMmapCount={}, replicas={}, "
-          + "failedLoads={}, evictable={}, evictableMmapped={}",
+              + "failedLoads={}, evictable={}, evictableMmapped={}",
           visitor.getClass().getName(), outstandingMmapCount, replicas,
           failedLoads, evictable, evictableMmapped);
       visitor.visit(outstandingMmapCount, replicas, failedLoads,
-            evictable, evictableMmapped);
+          evictable, evictableMmapped);
     } finally {
       lock.unlock();
     }
@@ -961,18 +956,18 @@ public class ShortCircuitCache implements Closeable {
    * @param datanode       The datanode to allocate a shm slot with.
    * @param peer           A peer connected to the datanode.
    * @param usedPeer       Will be set to true if we use up the provided peer.
-   * @param blockId        The block id and block pool id of the block we're 
+   * @param blockId        The block id and block pool id of the block we're
    *                         allocating this slot for.
    * @param clientName     The name of the DFSClient allocating the shared
    *                         memory.
    * @return               Null if short-circuit shared memory is disabled;
    *                         a short-circuit memory slot otherwise.
-   * @throws IOException   An exception if there was an error talking to 
+   * @throws IOException   An exception if there was an error talking to
    *                         the datanode.
    */
   public Slot allocShmSlot(DatanodeInfo datanode,
-        DomainPeer peer, MutableBoolean usedPeer,
-        ExtendedBlockId blockId, String clientName) throws IOException {
+      DomainPeer peer, MutableBoolean usedPeer,
+      ExtendedBlockId blockId, String clientName) throws IOException {
     if (shmManager != null) {
       return shmManager.allocSlot(datanode, peer, usedPeer,
           blockId, clientName);
@@ -985,7 +980,7 @@ public class ShortCircuitCache implements Closeable {
    * Free a slot immediately.
    *
    * ONLY use this if the DataNode is not yet aware of the slot.
-   * 
+   *
    * @param slot           The slot to free.
    */
   public void freeSlot(Slot slot) {
@@ -993,7 +988,7 @@ public class ShortCircuitCache implements Closeable {
     slot.makeInvalid();
     shmManager.freeSlot(slot);
   }
-  
+
   /**
    * Schedule a shared memory slot to be released.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java
index 38cf22b..fd5dbfc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hdfs.ExtendedBlockId;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
 import org.apache.hadoop.hdfs.util.IOUtilsClient;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.Time;
 
@@ -87,7 +86,7 @@ public class ShortCircuitReplica {
    * If non-null, the shared memory slot associated with this replica.
    */
   private final Slot slot;
-  
+
   /**
    * Current mmap state.
    *
@@ -171,14 +170,14 @@ public class ShortCircuitReplica {
       }
     }
   }
-  
+
   /**
    * Try to add a no-checksum anchor to our shared memory slot.
    *
    * It is only possible to add this anchor when the block is mlocked on the 
Datanode.
    * The DataNode will not munlock the block until the number of no-checksum 
anchors
    * for the block reaches zero.
-   * 
+   *
    * This method does not require any synchronization.
    *
    * @return     True if we successfully added a no-checksum anchor.
@@ -233,7 +232,7 @@ public class ShortCircuitReplica {
    */
   void close() {
     String suffix = "";
-    
+
     Preconditions.checkState(refCount == 0,
         "tried to close replica with refCount %d: %s", refCount, this);
     refCount = -1;
@@ -278,7 +277,7 @@ public class ShortCircuitReplica {
   MappedByteBuffer loadMmapInternal() {
     try {
       FileChannel channel = dataStream.getChannel();
-      MappedByteBuffer mmap = channel.map(MapMode.READ_ONLY, 0, 
+      MappedByteBuffer mmap = channel.map(MapMode.READ_ONLY, 0,
           Math.min(Integer.MAX_VALUE, channel.size()));
       LOG.trace("{}: created mmap of size {}", this, channel.size());
       return mmap;
@@ -325,13 +324,10 @@ public class ShortCircuitReplica {
    */
   @Override
   public String toString() {
-    return new StringBuilder().append("ShortCircuitReplica{").
-        append("key=").append(key).
-        append(", metaHeader.version=").append(metaHeader.getVersion()).
-        append(", metaHeader.checksum=").append(metaHeader.getChecksum()).
-        append(", ident=").append("0x").
-          append(Integer.toHexString(System.identityHashCode(this))).
-        append(", creationTimeMs=").append(creationTimeMs).
-        append("}").toString();
+    return "ShortCircuitReplica{" + "key=" + key
+        + ", metaHeader.version=" + metaHeader.getVersion()
+        + ", metaHeader.checksum=" + metaHeader.getChecksum()
+        + ", ident=" + "0x" + 
Integer.toHexString(System.identityHashCode(this))
+        + ", creationTimeMs=" + creationTimeMs + "}";
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplicaInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplicaInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplicaInfo.java
index ef0019f..cb466c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplicaInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplicaInfo.java
@@ -21,7 +21,7 @@ import 
org.apache.hadoop.security.token.SecretManager.InvalidToken;
 
 public final class ShortCircuitReplicaInfo {
   private final ShortCircuitReplica replica;
-  private final InvalidToken exc; 
+  private final InvalidToken exc;
 
   public ShortCircuitReplicaInfo() {
     this.replica = null;
@@ -43,9 +43,9 @@ public final class ShortCircuitReplicaInfo {
   }
 
   public InvalidToken getInvalidTokenException() {
-    return exc; 
+    return exc;
   }
-  
+
   public String toString() {
     StringBuilder builder = new StringBuilder();
     String prefix = "";
@@ -56,9 +56,8 @@ public final class ShortCircuitReplicaInfo {
     }
     if (exc != null) {
       builder.append(prefix).append(exc);
-      prefix = ", ";
     }
     builder.append("}");
     return builder.toString();
   }
-}
\ No newline at end of file
+}

Reply via email to