HDFS-8979. Clean up checkstyle warnings in hadoop-hdfs-client module. 
Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1257483e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1257483e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1257483e

Branch: refs/heads/branch-2
Commit: 1257483ebf64653705ba0032cb5fa0bb83aebcaa
Parents: 21b4ba4
Author: Haohui Mai <whe...@apache.org>
Authored: Sat Oct 3 11:06:21 2015 -0700
Committer: Haohui Mai <whe...@apache.org>
Committed: Sat Oct 3 11:37:19 2015 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/CacheFlag.java    |    2 +-
 .../org/apache/hadoop/fs/HdfsBlockLocation.java |    9 +-
 .../main/java/org/apache/hadoop/fs/XAttr.java   |    4 +-
 .../hadoop/hdfs/BlockMissingException.java      |    9 +-
 .../org/apache/hadoop/hdfs/BlockReader.java     |    5 +-
 .../apache/hadoop/hdfs/BlockReaderFactory.java  |   39 +-
 .../apache/hadoop/hdfs/BlockReaderLocal.java    |   68 +-
 .../hadoop/hdfs/BlockReaderLocalLegacy.java     |  117 +-
 .../org/apache/hadoop/hdfs/BlockReaderUtil.java |    2 +-
 .../org/apache/hadoop/hdfs/ClientContext.java   |   12 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 1407 ++++++++----------
 .../hadoop/hdfs/DFSClientFaultInjector.java     |    2 +-
 .../hadoop/hdfs/DFSInotifyEventInputStream.java |   25 +-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  194 ++-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  147 +-
 .../java/org/apache/hadoop/hdfs/DFSPacket.java  |   12 +-
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |   13 +-
 .../org/apache/hadoop/hdfs/DataStreamer.java    |  113 +-
 .../hadoop/hdfs/DistributedFileSystem.java      |  410 +++--
 .../org/apache/hadoop/hdfs/ExtendedBlockId.java |    5 +-
 .../apache/hadoop/hdfs/ExternalBlockReader.java |    2 +-
 .../apache/hadoop/hdfs/KeyProviderCache.java    |   18 +-
 .../hadoop/hdfs/NameNodeProxiesClient.java      |    4 +-
 .../java/org/apache/hadoop/hdfs/PeerCache.java  |   39 +-
 .../apache/hadoop/hdfs/RemoteBlockReader.java   |  148 +-
 .../apache/hadoop/hdfs/RemoteBlockReader2.java  |  126 +-
 .../apache/hadoop/hdfs/RemotePeerFactory.java   |    2 +-
 .../org/apache/hadoop/hdfs/XAttrHelper.java     |   39 +-
 .../hdfs/client/HdfsClientConfigKeys.java       |   45 +-
 .../hadoop/hdfs/client/HdfsDataInputStream.java |   10 +-
 .../hdfs/client/HdfsDataOutputStream.java       |   31 +-
 .../client/impl/CorruptFileBlockIterator.java   |    6 +-
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |   10 +-
 .../hadoop/hdfs/client/impl/LeaseRenewer.java   |   20 +-
 .../hadoop/hdfs/client/impl/package-info.java   |    2 +-
 .../org/apache/hadoop/hdfs/inotify/Event.java   |   49 +-
 .../apache/hadoop/hdfs/net/BasicInetPeer.java   |    8 +-
 .../org/apache/hadoop/hdfs/net/DomainPeer.java  |    2 +-
 .../apache/hadoop/hdfs/net/EncryptedPeer.java   |    6 +-
 .../org/apache/hadoop/hdfs/net/NioInetPeer.java |    4 +-
 .../java/org/apache/hadoop/hdfs/net/Peer.java   |   36 +-
 .../org/apache/hadoop/hdfs/protocol/Block.java  |   31 +-
 .../hdfs/protocol/BlockLocalPathInfo.java       |    6 +-
 .../hdfs/protocol/BlockStoragePolicy.java       |    8 +-
 .../hdfs/protocol/CacheDirectiveEntry.java      |    2 +-
 .../hdfs/protocol/CacheDirectiveInfo.java       |   21 +-
 .../hdfs/protocol/CacheDirectiveIterator.java   |   11 +-
 .../hdfs/protocol/CacheDirectiveStats.java      |   26 +-
 .../hadoop/hdfs/protocol/CachePoolInfo.java     |   22 +-
 .../hadoop/hdfs/protocol/CachePoolIterator.java |    5 +-
 .../hadoop/hdfs/protocol/CachePoolStats.java    |   14 +-
 .../hdfs/protocol/ClientDatanodeProtocol.java   |   44 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java    |    1 +
 .../hdfs/protocol/DSQuotaExceededException.java |    6 +-
 .../apache/hadoop/hdfs/protocol/DatanodeID.java |   12 +-
 .../hadoop/hdfs/protocol/DatanodeInfo.java      |   77 +-
 .../hdfs/protocol/DatanodeInfoWithStorage.java  |    3 +-
 .../hadoop/hdfs/protocol/DatanodeLocalInfo.java |    8 +-
 .../hdfs/protocol/EncryptionZoneIterator.java   |    7 +-
 .../hadoop/hdfs/protocol/HdfsFileStatus.java    |    3 +-
 .../hdfs/protocol/HdfsLocatedFileStatus.java    |    6 +-
 .../hdfs/protocol/LastBlockWithStatus.java      |    3 +-
 .../hadoop/hdfs/protocol/LocatedBlock.java      |    6 +-
 .../hadoop/hdfs/protocol/LocatedBlocks.java     |   49 +-
 .../hdfs/protocol/NSQuotaExceededException.java |    2 +-
 .../hdfs/protocol/RollingUpgradeInfo.java       |    8 +-
 .../hdfs/protocol/SnapshotDiffReport.java       |   55 +-
 .../protocol/SnapshottableDirectoryStatus.java  |   71 +-
 .../hdfs/protocol/UnresolvedPathException.java  |   16 +-
 .../datatransfer/BlockConstructionStage.java    |    8 +-
 .../datatransfer/DataTransferProtoUtil.java     |   18 +-
 .../datatransfer/DataTransferProtocol.java      |   56 +-
 .../protocol/datatransfer/IOStreamPair.java     |    4 +-
 .../hadoop/hdfs/protocol/datatransfer/Op.java   |    6 +-
 .../protocol/datatransfer/PacketHeader.java     |   42 +-
 .../protocol/datatransfer/PacketReceiver.java   |   42 +-
 .../hdfs/protocol/datatransfer/PipelineAck.java |   12 +-
 .../datatransfer/ReplaceDatanodeOnFailure.java  |   50 +-
 .../hdfs/protocol/datatransfer/Sender.java      |  119 +-
 .../datatransfer/TrustedChannelResolver.java    |    9 +-
 .../datatransfer/sasl/DataTransferSaslUtil.java |  119 +-
 .../sasl/SaslDataTransferClient.java            |  108 +-
 .../datatransfer/sasl/SaslParticipant.java      |   12 +-
 .../SaslResponseWithNegotiatedCipherOption.java |    6 +-
 .../protocolPB/ClientDatanodeProtocolPB.java    |    2 +-
 .../ClientDatanodeProtocolTranslatorPB.java     |   36 +-
 .../protocolPB/ClientNamenodeProtocolPB.java    |    6 +-
 .../ClientNamenodeProtocolTranslatorPB.java     |  321 ++--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  644 ++++----
 .../delegation/DelegationTokenIdentifier.java   |    3 +-
 .../delegation/DelegationTokenSelector.java     |    8 +-
 .../server/datanode/BlockMetadataHeader.java    |   32 +-
 .../hdfs/server/datanode/CachingStrategy.java   |    4 +-
 .../datanode/ReplicaNotFoundException.java      |   20 +-
 .../hdfs/server/namenode/SafeModeException.java |    6 +-
 .../ha/AbstractNNFailoverProxyProvider.java     |    9 +-
 .../ha/WrappedFailoverProxyProvider.java        |    4 +-
 .../hdfs/server/protocol/DatanodeStorage.java   |   27 +-
 .../server/protocol/DatanodeStorageReport.java  |    2 +-
 .../hdfs/server/protocol/StorageReport.java     |    2 +-
 .../hadoop/hdfs/shortcircuit/ClientMmap.java    |    6 +-
 .../hdfs/shortcircuit/DfsClientShmManager.java  |   48 +-
 .../hdfs/shortcircuit/DomainSocketFactory.java  |    5 +-
 .../hdfs/shortcircuit/ShortCircuitCache.java    |   87 +-
 .../hdfs/shortcircuit/ShortCircuitReplica.java  |   24 +-
 .../shortcircuit/ShortCircuitReplicaInfo.java   |    9 +-
 .../hdfs/shortcircuit/ShortCircuitShm.java      |   58 +-
 .../hdfs/util/ByteBufferOutputStream.java       |    2 +-
 .../hadoop/hdfs/util/ExactSizeInputStream.java  |   14 +-
 .../apache/hadoop/hdfs/util/LongBitFormat.java  |    5 +-
 .../apache/hadoop/hdfs/util/package-info.java   |    2 +-
 .../hadoop/hdfs/web/ByteRangeInputStream.java   |   32 +-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |   65 +-
 .../hadoop/hdfs/web/URLConnectionFactory.java   |   43 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      |  138 +-
 .../hdfs/web/oauth2/AccessTokenProvider.java    |    2 +-
 .../hdfs/web/oauth2/AccessTokenTimer.java       |   14 +-
 .../ConfCredentialBasedAccessTokenProvider.java |    4 +-
 ...onfRefreshTokenBasedAccessTokenProvider.java |   26 +-
 .../CredentialBasedAccessTokenProvider.java     |   20 +-
 .../oauth2/OAuth2ConnectionConfigurator.java    |   24 +-
 .../hadoop/hdfs/web/oauth2/OAuth2Constants.java |    4 +-
 .../apache/hadoop/hdfs/web/oauth2/Utils.java    |    6 +-
 .../hadoop/hdfs/web/oauth2/package-info.java    |    2 +-
 .../hdfs/web/resources/AccessTimeParam.java     |    2 +-
 .../hdfs/web/resources/AclPermissionParam.java  |    2 +-
 .../hdfs/web/resources/BlockSizeParam.java      |    2 +-
 .../hdfs/web/resources/BufferSizeParam.java     |    2 +-
 .../hdfs/web/resources/ConcatSourcesParam.java  |    3 +-
 .../hdfs/web/resources/CreateFlagParam.java     |    2 +-
 .../hdfs/web/resources/CreateParentParam.java   |    2 +-
 .../hdfs/web/resources/DelegationParam.java     |    2 +-
 .../hdfs/web/resources/DeleteOpParam.java       |    6 +-
 .../hadoop/hdfs/web/resources/DoAsParam.java    |    2 +-
 .../hadoop/hdfs/web/resources/EnumParam.java    |    3 +-
 .../hadoop/hdfs/web/resources/EnumSetParam.java |    3 +-
 .../web/resources/ExcludeDatanodesParam.java    |    2 +-
 .../hadoop/hdfs/web/resources/GetOpParam.java   |    8 +-
 .../hadoop/hdfs/web/resources/GroupParam.java   |    2 +-
 .../hadoop/hdfs/web/resources/HttpOpParam.java  |   20 +-
 .../hadoop/hdfs/web/resources/LengthParam.java  |    2 +-
 .../hadoop/hdfs/web/resources/LongParam.java    |    3 +-
 .../web/resources/ModificationTimeParam.java    |    2 +-
 .../hdfs/web/resources/NewLengthParam.java      |    2 +-
 .../hadoop/hdfs/web/resources/OffsetParam.java  |    2 +-
 .../hdfs/web/resources/OverwriteParam.java      |    2 +-
 .../hadoop/hdfs/web/resources/OwnerParam.java   |    2 +-
 .../apache/hadoop/hdfs/web/resources/Param.java |   16 +-
 .../hdfs/web/resources/PermissionParam.java     |    2 +-
 .../hadoop/hdfs/web/resources/PostOpParam.java  |    6 +-
 .../hadoop/hdfs/web/resources/PutOpParam.java   |    8 +-
 .../hdfs/web/resources/RecursiveParam.java      |    2 +-
 .../web/resources/RenameOptionSetParam.java     |    4 +-
 .../hadoop/hdfs/web/resources/RenewerParam.java |    2 +-
 .../hdfs/web/resources/ReplicationParam.java    |    2 +-
 .../hadoop/hdfs/web/resources/UserParam.java    |    6 +-
 .../hdfs/web/resources/XAttrEncodingParam.java  |    2 +-
 .../hdfs/web/resources/XAttrNameParam.java      |    6 +-
 .../hdfs/web/resources/XAttrSetFlagParam.java   |    2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |    3 +
 160 files changed, 2934 insertions(+), 3307 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/CacheFlag.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/CacheFlag.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/CacheFlag.java
index f76fcaa..0aec5a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/CacheFlag.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/CacheFlag.java
@@ -34,7 +34,7 @@ public enum CacheFlag {
   FORCE((short) 0x01);
   private final short mode;
 
-  private CacheFlag(short mode) {
+  CacheFlag(short mode) {
     this.mode = mode;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java
index 0ccacda..eac3f96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.fs;
 
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -33,14 +31,13 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 public class HdfsBlockLocation extends BlockLocation {
 
   private final LocatedBlock block;
-  
-  public HdfsBlockLocation(BlockLocation loc, LocatedBlock block) 
-      throws IOException {
+
+  public HdfsBlockLocation(BlockLocation loc, LocatedBlock block) {
     // Initialize with data from passed in BlockLocation
     super(loc);
     this.block = block;
   }
-  
+
   public LocatedBlock getLocatedBlock() {
     return block;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java
index f688c91..ad7b056 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java
@@ -57,12 +57,12 @@ import org.apache.hadoop.classification.InterfaceAudience;
 @InterfaceAudience.Private
 public class XAttr {
 
-  public static enum NameSpace {
+  public enum NameSpace {
     USER,
     TRUSTED,
     SECURITY,
     SYSTEM,
-    RAW;
+    RAW
   }
 
   private final NameSpace ns;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockMissingException.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockMissingException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockMissingException.java
index 7bba8a4..0fde39a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockMissingException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockMissingException.java
@@ -23,9 +23,9 @@ import java.io.IOException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
-/** 
-  * This exception is thrown when a read encounters a block that has no 
locations
-  * associated with it.
+/**
+  * This exception is thrown when a read encounters a block that has no
+  * locations associated with it.
   */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -41,7 +41,8 @@ public class BlockMissingException extends IOException {
    * @param filename name of corrupted file
    * @param description a description of the corruption details
    */
-  public BlockMissingException(String filename, String description, long 
offset) {
+  public BlockMissingException(String filename, String description,
+      long offset) {
     super(description);
     this.filename = filename;
     this.offset = offset;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
index aa3e8ba..9d40e5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
  */
 @InterfaceAudience.Private
 public interface BlockReader extends ByteBufferReadable {
-  
 
   /* same interface as inputStream java.io.InputStream#read()
    * used by DFSInputStream#read()
@@ -55,7 +54,7 @@ public interface BlockReader extends ByteBufferReadable {
    * network I/O.
    * This may return more than what is actually present in the block.
    */
-  int available() throws IOException;
+  int available();
 
   /**
    * Close the block reader.
@@ -84,7 +83,7 @@ public interface BlockReader extends ByteBufferReadable {
    * @return              true only if this is a local read.
    */
   boolean isLocal();
-  
+
   /**
    * @return              true only if this is a short-circuit read.
    *                      All short-circuit reads are also local.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index f249692..c7e2a7d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -72,7 +72,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 
-/** 
+/**
  * Utility class to create BlockReader implementations.
  */
 @InterfaceAudience.Private
@@ -127,7 +127,7 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   /**
    * The name of this client.
    */
-  private String clientName; 
+  private String clientName;
 
   /**
    * The DataNode we're talking to.
@@ -170,7 +170,8 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   private RemotePeerFactory remotePeerFactory;
 
   /**
-   * UserGroupInformation  to use for legacy block reader local objects, if 
needed.
+   * UserGroupInformation to use for legacy block reader local objects,
+   * if needed.
    */
   private UserGroupInformation userGroupInformation;
 
@@ -313,7 +314,7 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
    * There are a few caches that are important here.
    *
    * The ShortCircuitCache stores file descriptor objects which have been 
passed
-   * from the DataNode. 
+   * from the DataNode.
    *
    * The DomainSocketFactory stores information about UNIX domain socket paths
    * that we not been able to use in the past, so that we don't waste time
@@ -426,9 +427,9 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
       return null;
     }
     if (clientContext.getDisableLegacyBlockReaderLocal()) {
-        PerformanceAdvisory.LOG.debug("{}: can't construct " +
-            "BlockReaderLocalLegacy because " +
-            "disableLegacyBlockReaderLocal is set.", this);
+      PerformanceAdvisory.LOG.debug("{}: can't construct " +
+          "BlockReaderLocalLegacy because " +
+          "disableLegacyBlockReaderLocal is set.", this);
       return null;
     }
     IOException ioe;
@@ -470,7 +471,8 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
       return null;
     }
     ShortCircuitCache cache = clientContext.getShortCircuitCache();
-    ExtendedBlockId key = new ExtendedBlockId(block.getBlockId(), 
block.getBlockPoolId());
+    ExtendedBlockId key = new ExtendedBlockId(block.getBlockId(),
+        block.getBlockPoolId());
     ShortCircuitReplicaInfo info = cache.fetchOrCreate(key, this);
     InvalidToken exc = info.getInvalidTokenException();
     if (exc != null) {
@@ -501,14 +503,15 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
    *
    * @return    Null if we could not communicate with the datanode,
    *            a new ShortCircuitReplicaInfo object otherwise.
-   *            ShortCircuitReplicaInfo objects may contain either an 
InvalidToken
-   *            exception, or a ShortCircuitReplica object ready to use.
+   *            ShortCircuitReplicaInfo objects may contain either an
+   *            InvalidToken exception, or a ShortCircuitReplica object ready 
to
+   *            use.
    */
   @Override
   public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
     if (createShortCircuitReplicaInfoCallback != null) {
       ShortCircuitReplicaInfo info =
-        createShortCircuitReplicaInfoCallback.createShortCircuitReplicaInfo();
+          
createShortCircuitReplicaInfoCallback.createShortCircuitReplicaInfo();
       if (info != null) return info;
     }
     LOG.trace("{}: trying to create ShortCircuitReplicaInfo.", this);
@@ -548,7 +551,7 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
           // Handle an I/O error we got when using a newly created socket.
           // We temporarily disable the domain socket path for a few minutes in
           // this case, to prevent wasting more time on it.
-          LOG.warn(this + ": I/O error requesting file descriptors.  " + 
+          LOG.warn(this + ": I/O error requesting file descriptors.  " +
               "Disabling domain socket " + peer.getDomainSocket(), e);
           IOUtilsClient.cleanup(LOG, peer);
           clientContext.getDomainSocketFactory()
@@ -564,11 +567,11 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
    * Request file descriptors from a DomainPeer.
    *
    * @param peer   The peer to use for communication.
-   * @param slot   If non-null, the shared memory slot to associate with the 
+   * @param slot   If non-null, the shared memory slot to associate with the
    *               new ShortCircuitReplica.
-   * 
+   *
    * @return  A ShortCircuitReplica object if we could communicate with the
-   *          datanode; null, otherwise. 
+   *          datanode; null, otherwise.
    * @throws  IOException If we encountered an I/O exception while 
communicating
    *          with the datanode.
    */
@@ -682,7 +685,7 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
         }
         if (curPeer.fromCache) {
           // Handle an I/O error we got when using a cached peer.  These are
-          // considered less serious, because the underlying socket may be 
stale.
+          // considered less serious because the underlying socket may be 
stale.
           LOG.debug("Closed potentially stale domain peer {}", peer, ioe);
         } else {
           // Handle an I/O error we got when using a newly created domain peer.
@@ -756,7 +759,7 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   public static class BlockReaderPeer {
     final Peer peer;
     final boolean fromCache;
-    
+
     BlockReaderPeer(Peer peer, boolean fromCache) {
       this.peer = peer;
       this.fromCache = fromCache;
@@ -800,7 +803,7 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
     }
     try {
       Peer peer = remotePeerFactory.newConnectedPeer(inetSocketAddress, token,
-        datanode);
+          datanode);
       LOG.trace("nextTcpPeer: created newConnectedPeer {}", peer);
       return new BlockReaderPeer(peer, false);
     } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
index 10dc35c..ba06f91 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
@@ -52,7 +52,7 @@ import org.slf4j.LoggerFactory;
  * <ul>
  * <li>The client performing short circuit reads must be configured at the
  * datanode.</li>
- * <li>The client gets the file descriptors for the metadata file and the data 
+ * <li>The client gets the file descriptors for the metadata file and the data
  * file for the block using
  * {@link 
org.apache.hadoop.hdfs.server.datanode.DataXceiver#requestShortCircuitFds}.
  * </li>
@@ -155,7 +155,7 @@ class BlockReaderLocal implements BlockReader {
    * The Checksum FileChannel.
    */
   private final FileChannel checksumIn;
-  
+
   /**
    * Checksum type and size.
    */
@@ -170,12 +170,12 @@ class BlockReaderLocal implements BlockReader {
    * Name of the block, for logging purposes.
    */
   private final String filename;
-  
+
   /**
    * Block ID and Block Pool ID.
    */
   private final ExtendedBlock block;
-  
+
   /**
    * Cache of Checksum#bytesPerChecksum.
    */
@@ -204,11 +204,11 @@ class BlockReaderLocal implements BlockReader {
    * size of a single chunk, even if {@link #zeroReadaheadRequested} is true.
    * The reason is because we need to do a certain amount of buffering in order
    * to do checksumming.
-   * 
+   *
    * This determines how many bytes we'll use out of dataBuf and checksumBuf.
    * Why do we allocate buffers, and then (potentially) only use part of them?
    * The rationale is that allocating a lot of buffers of different sizes would
-   * make it very difficult for the DirectBufferPool to re-use buffers. 
+   * make it very difficult for the DirectBufferPool to re-use buffers.
    */
   private final int maxReadaheadLength;
 
@@ -335,9 +335,8 @@ class BlockReaderLocal implements BlockReader {
    */
   private synchronized int fillBuffer(ByteBuffer buf, boolean canSkipChecksum)
       throws IOException {
-    TraceScope scope = tracer.newScope(
-        "BlockReaderLocal#fillBuffer(" + block.getBlockId() + ")");
-    try {
+    try (TraceScope ignored = tracer.newScope(
+        "BlockReaderLocal#fillBuffer(" + block.getBlockId() + ")")) {
       int total = 0;
       long startDataPos = dataPos;
       int startBufPos = buf.position();
@@ -358,7 +357,8 @@ class BlockReaderLocal implements BlockReader {
           buf.limit(buf.position());
           buf.position(startBufPos);
           createChecksumBufIfNeeded();
-          int checksumsNeeded = (total + bytesPerChecksum - 1) / 
bytesPerChecksum;
+          int checksumsNeeded = (total + bytesPerChecksum - 1) /
+              bytesPerChecksum;
           checksumBuf.clear();
           checksumBuf.limit(checksumsNeeded * checksumSize);
           long checksumPos = BlockMetadataHeader.getHeaderSize()
@@ -367,8 +367,8 @@ class BlockReaderLocal implements BlockReader {
             int nRead = checksumIn.read(checksumBuf, checksumPos);
             if (nRead < 0) {
               throw new IOException("Got unexpected checksum file EOF at " +
-                  checksumPos + ", block file position " + startDataPos + " 
for " +
-                  "block " + block + " of file " + filename);
+                  checksumPos + ", block file position " + startDataPos +
+                  " for block " + block + " of file " + filename);
             }
             checksumPos += nRead;
           }
@@ -380,24 +380,16 @@ class BlockReaderLocal implements BlockReader {
         }
       }
       return total;
-    } finally {
-      scope.close();
     }
   }
 
   private boolean createNoChecksumContext() {
-    if (verifyChecksum) {
-      if (storageType != null && storageType.isTransient()) {
-        // Checksums are not stored for replicas on transient storage.  We do 
not
-        // anchor, because we do not intend for client activity to block 
eviction
-        // from transient storage on the DataNode side.
-        return true;
-      } else {
-        return replica.addNoChecksumAnchor();
-      }
-    } else {
-      return true;
-    }
+    return !verifyChecksum ||
+        // Checksums are not stored for replicas on transient storage.  We do
+        // not anchor, because we do not intend for client activity to block
+        // eviction from transient storage on the DataNode side.
+        (storageType != null && storageType.isTransient()) ||
+        replica.addNoChecksumAnchor();
   }
 
   private void releaseNoChecksumContext() {
@@ -453,14 +445,14 @@ class BlockReaderLocal implements BlockReader {
   /**
    * Fill the data buffer.  If necessary, validate the data against the
    * checksums.
-   * 
+   *
    * We always want the offsets of the data contained in dataBuf to be
    * aligned to the chunk boundary.  If we are validating checksums, we
    * accomplish this by seeking backwards in the file until we're on a
    * chunk boundary.  (This is necessary because we can't checksum a
    * partial chunk.)  If we are not validating checksums, we simply only
    * fill the latter part of dataBuf.
-   * 
+   *
    * @param canSkipChecksum  true if we can skip checksumming.
    * @return                 true if we hit EOF.
    * @throws IOException
@@ -473,11 +465,11 @@ class BlockReaderLocal implements BlockReader {
     dataBuf.limit(maxReadaheadLength);
     if (canSkipChecksum) {
       dataBuf.position(slop);
-      fillBuffer(dataBuf, canSkipChecksum);
+      fillBuffer(dataBuf, true);
     } else {
       dataPos -= slop;
       dataBuf.position(0);
-      fillBuffer(dataBuf, canSkipChecksum);
+      fillBuffer(dataBuf, false);
     }
     dataBuf.limit(dataBuf.position());
     dataBuf.position(Math.min(dataBuf.position(), slop));
@@ -501,7 +493,7 @@ class BlockReaderLocal implements BlockReader {
    * efficiency's sake. As described above, all non-checksum-chunk-aligned
    * reads will be served from the slower read path.
    *
-   * @param buf              The buffer to read into. 
+   * @param buf              The buffer to read into.
    * @param canSkipChecksum  True if we can skip checksums.
    */
   private synchronized int readWithBounceBuffer(ByteBuffer buf,
@@ -621,7 +613,7 @@ class BlockReaderLocal implements BlockReader {
   }
 
   @Override
-  public int available() throws IOException {
+  public int available() {
     // We never do network I/O in BlockReaderLocal.
     return Integer.MAX_VALUE;
   }
@@ -660,8 +652,8 @@ class BlockReaderLocal implements BlockReader {
 
   /**
    * Get or create a memory map for this replica.
-   * 
-   * There are two kinds of ClientMmap objects we could fetch here: one that 
+   *
+   * There are two kinds of ClientMmap objects we could fetch here: one that
    * will always read pre-checksummed data, and one that may read data that
    * hasn't been checksummed.
    *
@@ -671,13 +663,13 @@ class BlockReaderLocal implements BlockReader {
    * If we fetch the latter, we don't bother with anchoring.
    *
    * @param opts     The options to use, such as SKIP_CHECKSUMS.
-   * 
+   *
    * @return         null on failure; the ClientMmap otherwise.
    */
   @Override
   public ClientMmap getClientMmap(EnumSet<ReadOption> opts) {
     boolean anchor = verifyChecksum &&
-        (opts.contains(ReadOption.SKIP_CHECKSUMS) == false);
+        !opts.contains(ReadOption.SKIP_CHECKSUMS);
     if (anchor) {
       if (!createNoChecksumContext()) {
         LOG.trace("can't get an mmap for {} of {} since SKIP_CHECKSUMS was not 
"
@@ -696,7 +688,7 @@ class BlockReaderLocal implements BlockReader {
     }
     return clientMmap;
   }
-  
+
   @VisibleForTesting
   boolean getVerifyChecksum() {
     return this.verifyChecksum;
@@ -706,7 +698,7 @@ class BlockReaderLocal implements BlockReader {
   int getMaxReadaheadLength() {
     return this.maxReadaheadLength;
   }
-  
+
   /**
    * Make the replica anchorable.  Normally this can only be done by the
    * DataNode.  This method is only for testing.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
index 4a1828e..d754e3d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
@@ -56,10 +56,10 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * BlockReaderLocalLegacy enables local short circuited reads. If the DFS 
client is on
- * the same machine as the datanode, then the client can read files directly
- * from the local file system rather than going through the datanode for better
- * performance. <br>
+ * BlockReaderLocalLegacy enables local short circuited reads. If the DFS 
client
+ * is on the same machine as the datanode, then the client can read files
+ * directly from the local file system rather than going through the datanode
+ * for better performance. <br>
  *
  * This is the legacy implementation based on HDFS-2246, which requires
  * permissions on the datanode to be set so that clients can directly access 
the
@@ -90,7 +90,8 @@ class BlockReaderLocalLegacy implements BlockReader {
     LocalDatanodeInfo() {
       final int cacheSize = 10000;
       final float hashTableLoadFactor = 0.75f;
-      int hashTableCapacity = (int) Math.ceil(cacheSize / hashTableLoadFactor) 
+ 1;
+      int hashTableCapacity = (int) Math.ceil(cacheSize / hashTableLoadFactor)
+          + 1;
       cache = Collections
           .synchronizedMap(new LinkedHashMap<ExtendedBlock, 
BlockLocalPathInfo>(
               hashTableCapacity, hashTableLoadFactor, true) {
@@ -123,7 +124,7 @@ class BlockReaderLocalLegacy implements BlockReader {
       }
       return proxy;
     }
-    
+
     private synchronized void resetDatanodeProxy() {
       if (null != proxy) {
         RPC.stopProxy(proxy);
@@ -135,7 +136,8 @@ class BlockReaderLocalLegacy implements BlockReader {
       return cache.get(b);
     }
 
-    private void setBlockLocalPathInfo(ExtendedBlock b, BlockLocalPathInfo 
info) {
+    private void setBlockLocalPathInfo(ExtendedBlock b,
+        BlockLocalPathInfo info) {
       cache.put(b, info);
     }
 
@@ -143,10 +145,11 @@ class BlockReaderLocalLegacy implements BlockReader {
       cache.remove(b);
     }
   }
-  
+
   // Multiple datanodes could be running on the local machine. Store proxies in
   // a map keyed by the ipc port of the datanode.
-  private static final Map<Integer, LocalDatanodeInfo> localDatanodeInfoMap = 
new HashMap<Integer, LocalDatanodeInfo>();
+  private static final Map<Integer, LocalDatanodeInfo> localDatanodeInfoMap =
+      new HashMap<>();
 
   private final FileInputStream dataIn; // reader for the data file
   private final FileInputStream checksumIn;   // reader for the checksum file
@@ -158,7 +161,7 @@ class BlockReaderLocalLegacy implements BlockReader {
    * checksum read at construction to position the read cursor correctly.
    */
   private int offsetFromChunkBoundary;
-  
+
   private byte[] skipBuf = null;
 
   /**
@@ -188,7 +191,7 @@ class BlockReaderLocalLegacy implements BlockReader {
   static BlockReaderLocalLegacy newBlockReader(DfsClientConf conf,
       UserGroupInformation userGroupInformation,
       Configuration configuration, String file, ExtendedBlock blk,
-      Token<BlockTokenIdentifier> token, DatanodeInfo node, 
+      Token<BlockTokenIdentifier> token, DatanodeInfo node,
       long startOffset, long length, StorageType storageType,
       Tracer tracer) throws IOException {
     final ShortCircuitConf scConf = conf.getShortCircuitConf();
@@ -234,12 +237,12 @@ class BlockReaderLocalLegacy implements BlockReader {
             new DataInputStream(checksumIn), blk);
         long firstChunkOffset = startOffset
             - (startOffset % checksum.getBytesPerChecksum());
-        localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk, token,
-            startOffset, length, pathinfo, checksum, true, dataIn,
-            firstChunkOffset, checksumIn, tracer);
+        localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk,
+            startOffset, checksum, true, dataIn, firstChunkOffset, checksumIn,
+            tracer);
       } else {
-        localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk, token,
-            startOffset, length, pathinfo, dataIn, tracer);
+        localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk,
+            startOffset, dataIn, tracer);
       }
     } catch (IOException e) {
       // remove from cache
@@ -260,7 +263,7 @@ class BlockReaderLocalLegacy implements BlockReader {
     }
     return localBlockReader;
   }
-  
+
   private static synchronized LocalDatanodeInfo getLocalDatanodeInfo(int port) 
{
     LocalDatanodeInfo ldInfo = localDatanodeInfoMap.get(port);
     if (ldInfo == null) {
@@ -269,19 +272,20 @@ class BlockReaderLocalLegacy implements BlockReader {
     }
     return ldInfo;
   }
-  
+
   private static BlockLocalPathInfo getBlockPathInfo(UserGroupInformation ugi,
       ExtendedBlock blk, DatanodeInfo node, Configuration conf, int timeout,
       Token<BlockTokenIdentifier> token, boolean connectToDnViaHostname,
       StorageType storageType) throws IOException {
-    LocalDatanodeInfo localDatanodeInfo = 
getLocalDatanodeInfo(node.getIpcPort());
-    BlockLocalPathInfo pathinfo = null;
+    LocalDatanodeInfo localDatanodeInfo =
+        getLocalDatanodeInfo(node.getIpcPort());
+    BlockLocalPathInfo pathinfo;
     ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(ugi, 
node,
         conf, timeout, connectToDnViaHostname);
     try {
       // make RPC to local datanode to find local pathnames of blocks
       pathinfo = proxy.getBlockLocalPathInfo(blk, token);
-      // We cannot cache the path information for a replica on transient 
storage.
+      // We can't cache the path information for a replica on transient 
storage.
       // If the replica gets evicted, then it moves to a different path.  Then,
       // our next attempt to read from the cached path would fail to find the
       // file.  Additionally, the failure would cause us to disable legacy
@@ -299,7 +303,7 @@ class BlockReaderLocalLegacy implements BlockReader {
     }
     return pathinfo;
   }
-  
+
   private static int getSlowReadBufferNumChunks(int bufferSizeBytes,
       int bytesPerChecksum) {
     if (bufferSizeBytes < bytesPerChecksum) {
@@ -315,17 +319,15 @@ class BlockReaderLocalLegacy implements BlockReader {
   }
 
   private BlockReaderLocalLegacy(ShortCircuitConf conf, String hdfsfile,
-      ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
-      long length, BlockLocalPathInfo pathinfo, FileInputStream dataIn,
+      ExtendedBlock block, long startOffset, FileInputStream dataIn,
       Tracer tracer) throws IOException {
-    this(conf, hdfsfile, block, token, startOffset, length, pathinfo,
+    this(conf, hdfsfile, block, startOffset,
         DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4), false,
         dataIn, startOffset, null, tracer);
   }
 
   private BlockReaderLocalLegacy(ShortCircuitConf conf, String hdfsfile,
-      ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
-      long length, BlockLocalPathInfo pathinfo, DataChecksum checksum,
+      ExtendedBlock block, long startOffset, DataChecksum checksum,
       boolean verifyChecksum, FileInputStream dataIn, long firstChunkOffset,
       FileInputStream checksumIn, Tracer tracer) throws IOException {
     this.filename = hdfsfile;
@@ -343,17 +345,20 @@ class BlockReaderLocalLegacy implements BlockReader {
 
     final int chunksPerChecksumRead = getSlowReadBufferNumChunks(
         conf.getShortCircuitBufferSize(), bytesPerChecksum);
-    slowReadBuff = bufferPool.getBuffer(bytesPerChecksum * 
chunksPerChecksumRead);
+    slowReadBuff = bufferPool.getBuffer(
+        bytesPerChecksum * chunksPerChecksumRead);
     checksumBuff = bufferPool.getBuffer(checksumSize * chunksPerChecksumRead);
     // Initially the buffers have nothing to read.
     slowReadBuff.flip();
     checksumBuff.flip();
     boolean success = false;
     try {
-      // Skip both input streams to beginning of the chunk containing 
startOffset
+      // Skip both input streams to beginning of the chunk containing
+      // startOffset
       IOUtils.skipFully(dataIn, firstChunkOffset);
       if (checksumIn != null) {
-        long checkSumOffset = (firstChunkOffset / bytesPerChecksum) * 
checksumSize;
+        long checkSumOffset = (firstChunkOffset / bytesPerChecksum) *
+            checksumSize;
         IOUtils.skipFully(checksumIn, checkSumOffset);
       }
       success = true;
@@ -371,9 +376,8 @@ class BlockReaderLocalLegacy implements BlockReader {
    */
   private int fillBuffer(FileInputStream stream, ByteBuffer buf)
       throws IOException {
-    TraceScope scope = tracer.
-        newScope("BlockReaderLocalLegacy#fillBuffer(" + blockId + ")");
-    try {
+    try (TraceScope ignored = tracer.
+        newScope("BlockReaderLocalLegacy#fillBuffer(" + blockId + ")")) {
       int bytesRead = stream.getChannel().read(buf);
       if (bytesRead < 0) {
         //EOF
@@ -388,11 +392,9 @@ class BlockReaderLocalLegacy implements BlockReader {
         bytesRead += n;
       }
       return bytesRead;
-    } finally {
-      scope.close();
     }
   }
-  
+
   /**
    * Utility method used by read(ByteBuffer) to partially copy a ByteBuffer 
into
    * another.
@@ -426,7 +428,8 @@ class BlockReaderLocalLegacy implements BlockReader {
       if (slowReadBuff.hasRemaining()) {
         // There are remaining bytes from a small read available. This usually
         // means this read is unaligned, which falls back to the slow path.
-        int fromSlowReadBuff = Math.min(buf.remaining(), 
slowReadBuff.remaining());
+        int fromSlowReadBuff = Math.min(buf.remaining(),
+            slowReadBuff.remaining());
         writeSlice(slowReadBuff, buf, fromSlowReadBuff);
         nRead += fromSlowReadBuff;
       }
@@ -458,8 +461,10 @@ class BlockReaderLocalLegacy implements BlockReader {
 
       // offsetFromChunkBoundary > 0 => unaligned read, use slow path to read
       // until chunk boundary
-      if ((buf.remaining() > 0 && buf.remaining() < bytesPerChecksum) || 
offsetFromChunkBoundary > 0) {
-        int toRead = Math.min(buf.remaining(), bytesPerChecksum - 
offsetFromChunkBoundary);
+      if ((buf.remaining() > 0 && buf.remaining() < bytesPerChecksum) ||
+          offsetFromChunkBoundary > 0) {
+        int toRead = Math.min(buf.remaining(),
+            bytesPerChecksum - offsetFromChunkBoundary);
         int readResult = fillSlowReadBuffer(toRead);
         if (readResult == -1) {
           return nRead;
@@ -470,7 +475,8 @@ class BlockReaderLocalLegacy implements BlockReader {
         }
       }
     } else {
-      // Non-checksummed reads are much easier; we can just fill the buffer 
directly.
+      // Non-checksummed reads are much easier; we can just fill the buffer
+      // directly.
       nRead = doByteBufferRead(buf);
       if (nRead > 0) {
         buf.position(buf.position() + nRead);
@@ -512,7 +518,7 @@ class BlockReaderLocalLegacy implements BlockReader {
     if (verifyChecksum) {
       assert buf.remaining() % bytesPerChecksum == 0;
     }
-    int dataRead = -1;
+    int dataRead;
 
     int oldpos = buf.position();
     // Read as much as we can into the buffer.
@@ -528,9 +534,10 @@ class BlockReaderLocalLegacy implements BlockReader {
       toChecksum.limit(oldpos + dataRead);
 
       checksumBuff.clear();
-      // Equivalent to (int)Math.ceil(toChecksum.remaining() * 1.0 / 
bytesPerChecksum );
+      // Equivalent to
+      // (int)Math.ceil(toChecksum.remaining() * 1.0 / bytesPerChecksum );
       int numChunks =
-        (toChecksum.remaining() + bytesPerChecksum - 1) / bytesPerChecksum;
+          (toChecksum.remaining() + bytesPerChecksum - 1) / bytesPerChecksum;
       checksumBuff.limit(checksumSize * numChunks);
 
       fillBuffer(checksumIn, checksumBuff);
@@ -571,7 +578,7 @@ class BlockReaderLocalLegacy implements BlockReader {
    * @return the number of bytes available to read, or -1 if EOF.
    */
   private synchronized int fillSlowReadBuffer(int len) throws IOException {
-    int nRead = -1;
+    int nRead;
     if (slowReadBuff.hasRemaining()) {
       // Already got data, good to go.
       nRead = Math.min(len, slowReadBuff.remaining());
@@ -579,7 +586,8 @@ class BlockReaderLocalLegacy implements BlockReader {
       // Round a complete read of len bytes (plus any implicit offset) to the
       // next chunk boundary, since we try and read in multiples of a chunk
       int nextChunk = len + offsetFromChunkBoundary +
-          (bytesPerChecksum - ((len + offsetFromChunkBoundary) % 
bytesPerChecksum));
+          (bytesPerChecksum -
+              ((len + offsetFromChunkBoundary) % bytesPerChecksum));
       int limit = Math.min(nextChunk, slowReadBuff.capacity());
       assert limit % bytesPerChecksum == 0;
 
@@ -598,7 +606,8 @@ class BlockReaderLocalLegacy implements BlockReader {
   }
 
   @Override
-  public synchronized int read(byte[] buf, int off, int len) throws 
IOException {
+  public synchronized int read(byte[] buf, int off, int len)
+      throws IOException {
     LOG.trace("read off {} len {}", off, len);
     if (!verifyChecksum) {
       return dataIn.read(buf, off, len);
@@ -625,19 +634,19 @@ class BlockReaderLocalLegacy implements BlockReader {
     if (!verifyChecksum) {
       return dataIn.skip(n);
     }
-  
+
     // caller made sure newPosition is not beyond EOF.
     int remaining = slowReadBuff.remaining();
     int position = slowReadBuff.position();
     int newPosition = position + (int)n;
-  
+
     // if the new offset is already read into dataBuff, just reposition
     if (n <= remaining) {
       assert offsetFromChunkBoundary == 0;
       slowReadBuff.position(newPosition);
       return n;
     }
-  
+
     // for small gap, read through to keep the data/checksum in sync
     if (n - remaining <= bytesPerChecksum) {
       slowReadBuff.position(position + remaining);
@@ -647,11 +656,11 @@ class BlockReaderLocalLegacy implements BlockReader {
       int ret = read(skipBuf, 0, (int)(n - remaining));
       return (remaining + ret);
     }
-  
+
     // optimize for big gap: discard the current buffer, skip to
     // the beginning of the appropriate checksum chunk and then
     // read to the middle of that chunk to be in sync with checksums.
-  
+
     // We can't use this.offsetFromChunkBoundary because we need to know how
     // many bytes of the offset were really read. Calling read(..) with a
     // positive this.offsetFromChunkBoundary causes that many bytes to get
@@ -661,7 +670,7 @@ class BlockReaderLocalLegacy implements BlockReader {
 
     slowReadBuff.position(slowReadBuff.limit());
     checksumBuff.position(checksumBuff.limit());
-  
+
     IOUtils.skipFully(dataIn, toskip);
     long checkSumOffset = (toskip / bytesPerChecksum) * checksumSize;
     IOUtils.skipFully(checksumIn, checkSumOffset);
@@ -708,7 +717,7 @@ class BlockReaderLocalLegacy implements BlockReader {
   }
 
   @Override
-  public int available() throws IOException {
+  public int available() {
     // We never do network I/O in BlockReaderLocalLegacy.
     return Integer.MAX_VALUE;
   }
@@ -717,7 +726,7 @@ class BlockReaderLocalLegacy implements BlockReader {
   public boolean isLocal() {
     return true;
   }
-  
+
   @Override
   public boolean isShortCircuit() {
     return true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java
index dbc528e..85f925f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java
@@ -54,4 +54,4 @@ class BlockReaderUtil {
       off += ret;
     }
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
index 3836979..047645b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
@@ -35,19 +35,19 @@ import org.slf4j.LoggerFactory;
 
 /**
  * ClientContext contains context information for a client.
- * 
+ *
  * This allows us to share caches such as the socket cache across
  * DFSClient instances.
  */
 @InterfaceAudience.Private
 public class ClientContext {
-  private static final Logger LOG = 
LoggerFactory.getLogger(ClientContext.class);
+  private static final Logger LOG = LoggerFactory.getLogger(
+      ClientContext.class);
 
   /**
    * Global map of context names to caches contexts.
    */
-  private final static HashMap<String, ClientContext> CACHES =
-      new HashMap<String, ClientContext>();
+  private final static HashMap<String, ClientContext> CACHES = new HashMap<>();
 
   /**
    * Name of context.
@@ -93,7 +93,7 @@ public class ClientContext {
   private volatile boolean disableLegacyBlockReaderLocal = false;
 
   /** Creating byte[] for {@link DFSOutputStream}. */
-  private final ByteArrayManager byteArrayManager;  
+  private final ByteArrayManager byteArrayManager;
 
   /**
    * Whether or not we complained about a DFSClient fetching a CacheContext 
that
@@ -152,7 +152,7 @@ public class ClientContext {
       if (!printedConfWarning) {
         printedConfWarning = true;
         LOG.warn("Existing client context '" + name + "' does not match " +
-            "requested configuration.  Existing: " + existing + 
+            "requested configuration.  Existing: " + existing +
             ", Requested: " + requested);
       }
     }

Reply via email to