http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
index 2655c40..0bb9955 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
@@ -32,23 +32,23 @@ import com.google.common.collect.Maps;
 
 @InterfaceAudience.Private
 public class XAttrHelper {
-  
+
   /**
    * Build <code>XAttr</code> from xattr name with prefix.
    */
   public static XAttr buildXAttr(String name) {
     return buildXAttr(name, null);
   }
-  
+
   /**
    * Build <code>XAttr</code> from name with prefix and value.
-   * Name can not be null. Value can be null. The name and prefix 
+   * Name can not be null. Value can be null. The name and prefix
    * are validated.
    * Both name and namespace are case sensitive.
    */
   public static XAttr buildXAttr(String name, byte[] value) {
     Preconditions.checkNotNull(name, "XAttr name cannot be null.");
-    
+
     final int prefixIndex = name.indexOf(".");
     if (prefixIndex < 3) {// Prefix length is at least 3.
       throw new HadoopIllegalArgumentException("An XAttr name must be " +
@@ -56,7 +56,7 @@ public class XAttrHelper {
     } else if (prefixIndex == name.length() - 1) {
       throw new HadoopIllegalArgumentException("XAttr name cannot be empty.");
     }
-    
+
     NameSpace ns;
     final String prefix = name.substring(0, prefixIndex);
     if (StringUtils.equalsIgnoreCase(prefix, NameSpace.USER.toString())) {
@@ -77,12 +77,11 @@ public class XAttrHelper {
       throw new HadoopIllegalArgumentException("An XAttr name must be " +
           "prefixed with user/trusted/security/system/raw, followed by a '.'");
     }
-    XAttr xAttr = (new XAttr.Builder()).setNameSpace(ns).setName(name.
+
+    return (new XAttr.Builder()).setNameSpace(ns).setName(name.
         substring(prefixIndex + 1)).setValue(value).build();
-    
-    return xAttr;
   }
-  
+
   /**
    * Build xattr name with prefix as <code>XAttr</code> list.
    */
@@ -90,10 +89,10 @@ public class XAttrHelper {
     XAttr xAttr = buildXAttr(name);
     List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
     xAttrs.add(xAttr);
-    
+
     return xAttrs;
   }
-  
+
   /**
    * Get value of first xattr from <code>XAttr</code> list
    */
@@ -108,7 +107,7 @@ public class XAttrHelper {
     }
     return value;
   }
-  
+
   /**
    * Get first xattr from <code>XAttr</code> list
    */
@@ -116,13 +115,13 @@ public class XAttrHelper {
     if (xAttrs != null && !xAttrs.isEmpty()) {
       return xAttrs.get(0);
     }
-    
+
     return null;
   }
-  
+
   /**
-   * Build xattr map from <code>XAttr</code> list, the key is 
-   * xattr name with prefix, and value is xattr value. 
+   * Build xattr map from <code>XAttr</code> list, the key is
+   * xattr name with prefix, and value is xattr value.
    */
   public static Map<String, byte[]> buildXAttrMap(List<XAttr> xAttrs) {
     if (xAttrs == null) {
@@ -137,10 +136,10 @@ public class XAttrHelper {
       }
       xAttrMap.put(name, value);
     }
-    
+
     return xAttrMap;
   }
-  
+
   /**
    * Get name with prefix from <code>XAttr</code>
    */
@@ -164,11 +163,11 @@ public class XAttrHelper {
       throw new HadoopIllegalArgumentException("XAttr names can not be " +
           "null or empty.");
     }
-    
+
     List<XAttr> xAttrs = Lists.newArrayListWithCapacity(names.size());
     for (String name : names) {
       xAttrs.add(buildXAttr(name, null));
     }
     return xAttrs;
-  } 
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 13cd782..0bbdcb6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -31,7 +31,8 @@ public interface HdfsClientConfigKeys {
   long    DFS_BLOCK_SIZE_DEFAULT = 128*1024*1024;
   String  DFS_REPLICATION_KEY = "dfs.replication";
   short   DFS_REPLICATION_DEFAULT = 3;
-  String  DFS_WEBHDFS_USER_PATTERN_KEY = 
"dfs.webhdfs.user.provider.user.pattern";
+  String  DFS_WEBHDFS_USER_PATTERN_KEY =
+      "dfs.webhdfs.user.provider.user.pattern";
   String  DFS_WEBHDFS_USER_PATTERN_DEFAULT = "^[A-Za-z_][A-Za-z0-9._-]*[$]?$";
   String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
       
"^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
@@ -125,7 +126,8 @@ public interface HdfsClientConfigKeys {
       "dfs.datanode.hdfs-blocks-metadata.enabled";
   boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT = false;
 
-  String  DFS_DATANODE_KERBEROS_PRINCIPAL_KEY = 
"dfs.datanode.kerberos.principal";
+  String  DFS_DATANODE_KERBEROS_PRINCIPAL_KEY =
+      "dfs.datanode.kerberos.principal";
   String  DFS_DATANODE_READAHEAD_BYTES_KEY = "dfs.datanode.readahead.bytes";
   long    DFS_DATANODE_READAHEAD_BYTES_DEFAULT = 4 * 1024 * 1024; // 4MB
   String  DFS_ENCRYPTION_KEY_PROVIDER_URI = "dfs.encryption.key.provider.uri";
@@ -142,7 +144,8 @@ public interface HdfsClientConfigKeys {
       "dfs.encrypt.data.transfer.cipher.key.bitlength";
   int    DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT = 128;
 
-  String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = 
"dfs.trustedchannel.resolver.class";
+  String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS =
+      "dfs.trustedchannel.resolver.class";
 
   String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY =
       PREFIX + "replica.accessor.builder.classes";
@@ -161,13 +164,15 @@ public interface HdfsClientConfigKeys {
     String PREFIX = HdfsClientConfigKeys.PREFIX + "retry.";
 
     String  POLICY_ENABLED_KEY = PREFIX + "policy.enabled";
-    boolean POLICY_ENABLED_DEFAULT = false; 
+    boolean POLICY_ENABLED_DEFAULT = false;
     String  POLICY_SPEC_KEY = PREFIX + "policy.spec";
-    String  POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,... 
+    String  POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,...
 
-    String  TIMES_GET_LAST_BLOCK_LENGTH_KEY = PREFIX + 
"times.get-last-block-length";
+    String  TIMES_GET_LAST_BLOCK_LENGTH_KEY =
+        PREFIX + "times.get-last-block-length";
     int     TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT = 3;
-    String  INTERVAL_GET_LAST_BLOCK_LENGTH_KEY = PREFIX + 
"interval-ms.get-last-block-length";
+    String  INTERVAL_GET_LAST_BLOCK_LENGTH_KEY =
+        PREFIX + "interval-ms.get-last-block-length";
     int     INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT = 4000;
 
     String  MAX_ATTEMPTS_KEY = PREFIX + "max.attempts";
@@ -190,17 +195,19 @@ public interface HdfsClientConfigKeys {
     int     SLEEPTIME_MAX_DEFAULT = 15000;
     String  CONNECTION_RETRIES_KEY = PREFIX + "connection.retries";
     int     CONNECTION_RETRIES_DEFAULT = 0;
-    String  CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY = PREFIX + 
"connection.retries.on.timeouts";
+    String  CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY =
+        PREFIX + "connection.retries.on.timeouts";
     int     CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 0;
   }
-  
+
   /** dfs.client.write configuration properties */
   interface Write {
     String PREFIX = HdfsClientConfigKeys.PREFIX + "write.";
 
     String  MAX_PACKETS_IN_FLIGHT_KEY = PREFIX + "max-packets-in-flight";
     int     MAX_PACKETS_IN_FLIGHT_DEFAULT = 80;
-    String  EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_KEY = PREFIX + 
"exclude.nodes.cache.expiry.interval.millis";
+    String  EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_KEY =
+        PREFIX + "exclude.nodes.cache.expiry.interval.millis";
     long    EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT = 10*MINUTE;
 
     interface ByteArrayManager {
@@ -212,7 +219,8 @@ public interface HdfsClientConfigKeys {
       int     COUNT_THRESHOLD_DEFAULT = 128;
       String  COUNT_LIMIT_KEY = PREFIX + "count-limit";
       int     COUNT_LIMIT_DEFAULT = 2048;
-      String  COUNT_RESET_TIME_PERIOD_MS_KEY = PREFIX + 
"count-reset-time-period-ms";
+      String  COUNT_RESET_TIME_PERIOD_MS_KEY =
+          PREFIX + "count-reset-time-period-ms";
       long    COUNT_RESET_TIME_PERIOD_MS_DEFAULT = 10*SECOND;
     }
   }
@@ -223,9 +231,11 @@ public interface HdfsClientConfigKeys {
 
     String  RETRIES_KEY = PREFIX + "retries";
     int     RETRIES_DEFAULT = 3;
-    String  LOCATEFOLLOWINGBLOCK_RETRIES_KEY = PREFIX + 
"locateFollowingBlock.retries";
+    String  LOCATEFOLLOWINGBLOCK_RETRIES_KEY =
+        PREFIX + "locateFollowingBlock.retries";
     int     LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT = 5;
-    String  LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_KEY = PREFIX + 
"locateFollowingBlock.initial.delay.ms";
+    String  LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_KEY =
+        PREFIX + "locateFollowingBlock.initial.delay.ms";
     int     LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_DEFAULT = 400;
 
     interface ReplaceDatanodeOnFailure {
@@ -243,8 +253,8 @@ public interface HdfsClientConfigKeys {
   /** dfs.client.read configuration properties */
   interface Read {
     String PREFIX = HdfsClientConfigKeys.PREFIX + "read.";
-    
-    String  PREFETCH_SIZE_KEY = PREFIX + "prefetch.size"; 
+
+    String  PREFETCH_SIZE_KEY = PREFIX + "prefetch.size";
 
     interface ShortCircuit {
       String PREFIX = Read.PREFIX + "shortcircuit.";
@@ -267,7 +277,8 @@ public interface HdfsClientConfigKeys {
   interface ShortCircuit {
     String PREFIX = Read.PREFIX + "short.circuit.";
 
-    String  REPLICA_STALE_THRESHOLD_MS_KEY = PREFIX + 
"replica.stale.threshold.ms";
+    String  REPLICA_STALE_THRESHOLD_MS_KEY =
+        PREFIX + "replica.stale.threshold.ms";
     long    REPLICA_STALE_THRESHOLD_MS_DEFAULT = 30*MINUTE;
   }
 
@@ -304,7 +315,7 @@ public interface HdfsClientConfigKeys {
     String  RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,...
     String  RETRY_MAX_ATTEMPTS_KEY = PREFIX + "retry.max.attempts";
     int     RETRY_MAX_ATTEMPTS_DEFAULT = 10;
-    
+
     // failover
     String  FAILOVER_MAX_ATTEMPTS_KEY = PREFIX + "failover.max.attempts";
     int     FAILOVER_MAX_ATTEMPTS_DEFAULT =  15;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
index e8ac686..8e95451 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
@@ -38,11 +38,11 @@ import com.google.common.base.Preconditions;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class HdfsDataInputStream extends FSDataInputStream {
-  public HdfsDataInputStream(DFSInputStream in) throws IOException {
+  public HdfsDataInputStream(DFSInputStream in) {
     super(in);
   }
 
-  public HdfsDataInputStream(CryptoInputStream in) throws IOException {
+  public HdfsDataInputStream(CryptoInputStream in) {
     super(in);
     Preconditions.checkArgument(in.getWrappedStream() instanceof 
DFSInputStream,
         "CryptoInputStream should wrap a DFSInputStream");
@@ -63,7 +63,7 @@ public class HdfsDataInputStream extends FSDataInputStream {
    * @return the underlying output stream
    */
   public InputStream getWrappedStream() {
-      return in;
+    return in;
   }
 
   /**
@@ -90,10 +90,10 @@ public class HdfsDataInputStream extends FSDataInputStream {
   /**
    * Get the visible length of the file. It will include the length of the last
    * block even if that is in UnderConstruction state.
-   * 
+   *
    * @return The visible length of the file.
    */
-  public long getVisibleLength() throws IOException {
+  public long getVisibleLength() {
     return getDFSInputStream().getFileLength();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
index 745ca7e..d733129 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
@@ -41,33 +41,34 @@ public class HdfsDataOutputStream extends 
FSDataOutputStream {
     super(out, stats, startPosition);
   }
 
-  public HdfsDataOutputStream(DFSOutputStream out, FileSystem.Statistics stats
-      ) throws IOException {
+  public HdfsDataOutputStream(DFSOutputStream out, FileSystem.Statistics stats)
+      throws IOException {
     this(out, stats, 0L);
   }
 
-  public HdfsDataOutputStream(CryptoOutputStream out, FileSystem.Statistics 
stats,
-      long startPosition) throws IOException {
+  public HdfsDataOutputStream(CryptoOutputStream out,
+      FileSystem.Statistics stats, long startPosition) throws IOException {
     super(out, stats, startPosition);
-    Preconditions.checkArgument(out.getWrappedStream() instanceof 
DFSOutputStream,
+    Preconditions.checkArgument(
+        out.getWrappedStream() instanceof DFSOutputStream,
         "CryptoOutputStream should wrap a DFSOutputStream");
   }
 
-  public HdfsDataOutputStream(CryptoOutputStream out, FileSystem.Statistics 
stats)
-      throws IOException {
+  public HdfsDataOutputStream(CryptoOutputStream out,
+      FileSystem.Statistics stats) throws IOException {
     this(out, stats, 0L);
   }
 
   /**
    * Get the actual number of replicas of the current block.
-   * 
+   *
    * This can be different from the designated replication factor of the file
    * because the namenode does not maintain replication for the blocks which 
are
    * currently being written to. Depending on the configuration, the client may
    * continue to write to a block even if a few datanodes in the write pipeline
    * have failed, or the client may add a new datanodes once a datanode has
    * failed.
-   * 
+   *
    * @return the number of valid replicas of the current block
    */
   public synchronized int getCurrentBlockReplication() throws IOException {
@@ -77,10 +78,10 @@ public class HdfsDataOutputStream extends 
FSDataOutputStream {
     }
     return ((DFSOutputStream) wrappedStream).getCurrentBlockReplication();
   }
-  
+
   /**
    * Sync buffered data to DataNodes (flush to disk devices).
-   * 
+   *
    * @param syncFlags
    *          Indicate the detailed semantic and actions of the hsync.
    * @throws IOException
@@ -89,13 +90,13 @@ public class HdfsDataOutputStream extends 
FSDataOutputStream {
   public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
     OutputStream wrappedStream = getWrappedStream();
     if (wrappedStream instanceof CryptoOutputStream) {
-      ((CryptoOutputStream) wrappedStream).flush();
+      wrappedStream.flush();
       wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream();
     }
     ((DFSOutputStream) wrappedStream).hsync(syncFlags);
   }
-  
-  public static enum SyncFlag {
+
+  public enum SyncFlag {
 
     /**
      * When doing sync to DataNodes, also update the metadata (block length) in
@@ -107,6 +108,6 @@ public class HdfsDataOutputStream extends 
FSDataOutputStream {
      * Sync the data to DataNode, close the current block, and allocate a new
      * block
      */
-    END_BLOCK;
+    END_BLOCK
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/CorruptFileBlockIterator.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/CorruptFileBlockIterator.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/CorruptFileBlockIterator.java
index 77bed1a..1daf3f4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/CorruptFileBlockIterator.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/CorruptFileBlockIterator.java
@@ -84,13 +84,13 @@ public class CorruptFileBlockIterator implements 
RemoteIterator<Path> {
     }
   }
 
-  
+
   @Override
   public boolean hasNext() {
     return nextPath != null;
   }
 
-  
+
   @Override
   public Path next() throws IOException {
     if (!hasNext()) {
@@ -102,4 +102,4 @@ public class CorruptFileBlockIterator implements 
RemoteIterator<Path> {
 
     return result;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
index 7c18e00..15387bb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
@@ -79,7 +79,6 @@ import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Retry;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.ShortCircuit;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Write;
 
-import java.lang.Class;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -254,9 +253,8 @@ public class DfsClientConf {
 
   @SuppressWarnings("unchecked")
   private List<Class<? extends ReplicaAccessorBuilder>>
-      loadReplicaAccessorBuilderClasses(Configuration conf)
-  {
-    String classNames[] = conf.getTrimmedStrings(
+      loadReplicaAccessorBuilderClasses(Configuration conf) {
+    String[] classNames = conf.getTrimmedStrings(
         HdfsClientConfigKeys.REPLICA_ACCESSOR_BUILDER_CLASSES_KEY);
     if (classNames.length == 0) {
       return Collections.emptyList();
@@ -267,8 +265,8 @@ public class DfsClientConf {
     for (String className: classNames) {
       try {
         Class<? extends ReplicaAccessorBuilder> cls =
-          (Class<? extends ReplicaAccessorBuilder>)
-            classLoader.loadClass(className);
+            (Class<? extends ReplicaAccessorBuilder>)
+                classLoader.loadClass(className);
         classes.add(cls);
       } catch (Throwable t) {
         LOG.warn("Unable to load " + className, t);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
index 8457d65..6faf133 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
@@ -42,8 +42,8 @@ import org.slf4j.LoggerFactory;
 
 /**
  * <p>
- * Used by {@link org.apache.hadoop.hdfs.DFSClient} for renewing 
file-being-written leases
- * on the namenode.
+ * Used by {@link org.apache.hadoop.hdfs.DFSClient} for renewing
+ * file-being-written leases on the namenode.
  * When a file is opened for write (create or append),
  * namenode stores a file lease for recording the identity of the writer.
  * The writer (i.e. the DFSClient) is required to renew the lease periodically.
@@ -57,7 +57,8 @@ import org.slf4j.LoggerFactory;
  * <li>
  * It maintains a map from (namenode, user) pairs to lease renewers.
  * The same {@link LeaseRenewer} instance is used for renewing lease
- * for all the {@link org.apache.hadoop.hdfs.DFSClient} to the same namenode 
and the same user.
+ * for all the {@link org.apache.hadoop.hdfs.DFSClient} to the same namenode 
and
+ * the same user.
  * </li>
  * <li>
  * Each renewer maintains a list of {@link org.apache.hadoop.hdfs.DFSClient}.
@@ -80,7 +81,7 @@ public class LeaseRenewer {
 
   /** Get a {@link LeaseRenewer} instance */
   public static LeaseRenewer getInstance(final String authority,
-      final UserGroupInformation ugi, final DFSClient dfsc) throws IOException 
{
+      final UserGroupInformation ugi, final DFSClient dfsc) {
     final LeaseRenewer r = Factory.INSTANCE.get(authority, ugi);
     r.addClient(dfsc);
     return r;
@@ -136,7 +137,7 @@ public class LeaseRenewer {
     }
 
     /** A map for per user per namenode renewers. */
-    private final Map<Key, LeaseRenewer> renewers = new HashMap<Key, 
LeaseRenewer>();
+    private final Map<Key, LeaseRenewer> renewers = new HashMap<>();
 
     /** Get a renewer. */
     private synchronized LeaseRenewer get(final String authority,
@@ -189,7 +190,7 @@ public class LeaseRenewer {
   private final Factory.Key factorykey;
 
   /** A list of clients corresponding to this renewer. */
-  private final List<DFSClient> dfsclients = new ArrayList<DFSClient>();
+  private final List<DFSClient> dfsclients = new ArrayList<>();
 
   /**
    * A stringified stack trace of the call stack when the Lease Renewer
@@ -404,7 +405,7 @@ public class LeaseRenewer {
   private void renew() throws IOException {
     final List<DFSClient> copies;
     synchronized(this) {
-      copies = new ArrayList<DFSClient>(dfsclients);
+      copies = new ArrayList<>(dfsclients);
     }
     //sort the client names for finding out repeated names.
     Collections.sort(copies, new Comparator<DFSClient>() {
@@ -414,8 +415,7 @@ public class LeaseRenewer {
       }
     });
     String previousName = "";
-    for(int i = 0; i < copies.size(); i++) {
-      final DFSClient c = copies.get(i);
+    for (final DFSClient c : copies) {
       //skip if current client name is the same as the previous name.
       if (!c.getClientName().equals(previousName)) {
         if (!c.renewLease()) {
@@ -470,7 +470,7 @@ public class LeaseRenewer {
               LOG.debug("Lease renewer daemon for " + clientsString()
                   + " with renew id " + id + " is not current");
             } else {
-               LOG.debug("Lease renewer daemon for " + clientsString()
+              LOG.debug("Lease renewer daemon for " + clientsString()
                   + " with renew id " + id + " expired");
             }
           }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/package-info.java
index 44a8b45..863462c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/package-info.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/package-info.java
@@ -15,4 +15,4 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.client.impl;
\ No newline at end of file
+package org.apache.hadoop.hdfs.client.impl;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
index 6f2b5e2..78a85b0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
@@ -34,7 +34,7 @@ import java.util.List;
 @InterfaceAudience.Public
 @InterfaceStability.Unstable
 public abstract class Event {
-  public static enum EventType {
+  public enum EventType {
     CREATE, CLOSE, APPEND, RENAME, METADATA, UNLINK, TRUNCATE
   }
 
@@ -98,8 +98,8 @@ public abstract class Event {
   @InterfaceAudience.Public
   public static class CreateEvent extends Event {
 
-    public static enum INodeType {
-      FILE, DIRECTORY, SYMLINK;
+    public enum INodeType {
+      FILE, DIRECTORY, SYMLINK
     }
 
     private INodeType iNodeType;
@@ -247,17 +247,21 @@ public abstract class Event {
     @InterfaceStability.Unstable
     public String toString() {
       StringBuilder content = new StringBuilder();
-      content.append("CreateEvent [INodeType=" + iNodeType + ", path=" + path
-          + ", ctime=" + ctime + ", replication=" + replication
-          + ", ownerName=" + ownerName + ", groupName=" + groupName
-          + ", perms=" + perms + ", ");
+      content.append("CreateEvent [INodeType=").append(iNodeType)
+          .append(", path=").append(path)
+          .append(", ctime=").append(ctime)
+          .append(", replication=").append(replication)
+          .append(", ownerName=").append(ownerName)
+          .append(", groupName=").append(groupName)
+          .append(", perms=").append(perms).append(", ");
 
       if (symlinkTarget != null) {
-        content.append("symlinkTarget=" + symlinkTarget + ", ");
+        content.append("symlinkTarget=").append(symlinkTarget).append(", ");
       }
 
-      content.append("overwrite=" + overwrite + ", defaultBlockSize="
-          + defaultBlockSize + "]");
+      content.append("overwrite=").append(overwrite)
+          .append(", defaultBlockSize=").append(defaultBlockSize)
+          .append("]");
       return content.toString();
     }
 
@@ -274,8 +278,8 @@ public abstract class Event {
   @InterfaceAudience.Public
   public static class MetadataUpdateEvent extends Event {
 
-    public static enum MetadataType {
-      TIMES, REPLICATION, OWNER, PERMS, ACLS, XATTRS;
+    public enum MetadataType {
+      TIMES, REPLICATION, OWNER, PERMS, ACLS, XATTRS
     }
 
     private String path;
@@ -434,28 +438,29 @@ public abstract class Event {
     @InterfaceStability.Unstable
     public String toString() {
       StringBuilder content = new StringBuilder();
-      content.append("MetadataUpdateEvent [path=" + path + ", metadataType="
-          + metadataType);
+      content.append("MetadataUpdateEvent [path=").append(path)
+          .append(", metadataType=").append(metadataType);
       switch (metadataType) {
       case TIMES:
-        content.append(", mtime=" + mtime + ", atime=" + atime);
+        content.append(", mtime=").append(mtime)
+            .append(", atime=").append(atime);
         break;
       case REPLICATION:
-        content.append(", replication=" + replication);
+        content.append(", replication=").append(replication);
         break;
       case OWNER:
-        content.append(", ownerName=" + ownerName
-            + ", groupName=" + groupName);
+        content.append(", ownerName=").append(ownerName)
+            .append(", groupName=").append(groupName);
         break;
       case PERMS:
-        content.append(", perms=" + perms);
+        content.append(", perms=").append(perms);
         break;
       case ACLS:
-        content.append(", acls=" + acls);
+        content.append(", acls=").append(acls);
         break;
       case XATTRS:
-        content.append(", xAttrs=" + xAttrs + ", xAttrsRemoved="
-            + xAttrsRemoved);
+        content.append(", xAttrs=").append(xAttrs)
+            .append(", xAttrsRemoved=").append(xAttrsRemoved);
         break;
       default:
         break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java
index 212dbef..f18c5a0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java
@@ -68,14 +68,14 @@ public class BasicInetPeer implements Peer {
 
   @Override
   public void setWriteTimeout(int timeoutMs) {
-   /* 
+   /*
     * We can't implement write timeouts. :(
-    * 
+    *
     * Java provides no facility to set a blocking write timeout on a Socket.
     * You can simulate a blocking write with a timeout by using
     * non-blocking I/O.  However, we can't use nio here, because this Socket
     * doesn't have an associated Channel.
-    * 
+    *
     * See http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4031100 for
     * more details.
     */
@@ -100,7 +100,7 @@ public class BasicInetPeer implements Peer {
   public String getLocalAddressString() {
     return socket.getLocalSocketAddress().toString();
   }
-  
+
   @Override
   public InputStream getInputStream() throws IOException {
     return in;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/DomainPeer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/DomainPeer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/DomainPeer.java
index 4792b0e..58c7e61 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/DomainPeer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/DomainPeer.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.classification.InterfaceAudience;
 
 /**
- * Represents a peer that we communicate with by using blocking I/O 
+ * Represents a peer that we communicate with by using blocking I/O
  * on a UNIX domain socket.
  */
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
index da660c7..0481659 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
@@ -38,12 +38,12 @@ public class EncryptedPeer implements Peer {
    * An encrypted InputStream.
    */
   private final InputStream in;
-  
+
   /**
    * An encrypted OutputStream.
    */
   private final OutputStream out;
-  
+
   /**
    * An encrypted ReadableByteChannel.
    */
@@ -53,7 +53,7 @@ public class EncryptedPeer implements Peer {
     this.enclosedPeer = enclosedPeer;
     this.in = ios.in;
     this.out = ios.out;
-    this.channel = ios.in instanceof ReadableByteChannel ? 
+    this.channel = ios.in instanceof ReadableByteChannel ?
         (ReadableByteChannel)ios.in : null;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java
index a12a69b..23a45b7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.net.SocketOutputStream;
 import org.apache.hadoop.net.unix.DomainSocket;
 
 /**
- * Represents a peer that we communicate with by using non-blocking I/O 
+ * Represents a peer that we communicate with by using non-blocking I/O
  * on a Socket.
  */
 public class NioInetPeer implements Peer {
@@ -38,7 +38,7 @@ public class NioInetPeer implements Peer {
    * An InputStream which simulates blocking I/O with timeouts using NIO.
    */
   private final SocketInputStream in;
-  
+
   /**
    * An OutputStream which simulates blocking I/O with timeouts using NIO.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/Peer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/Peer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/Peer.java
index 3c38d5f..8fecc6e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/Peer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/Peer.java
@@ -34,24 +34,24 @@ public interface Peer extends Closeable {
    * @return                The input stream channel associated with this
    *                        peer, or null if it has none.
    */
-  public ReadableByteChannel getInputStreamChannel();
+  ReadableByteChannel getInputStreamChannel();
 
   /**
    * Set the read timeout on this peer.
    *
    * @param timeoutMs       The timeout in milliseconds.
    */
-  public void setReadTimeout(int timeoutMs) throws IOException;
+  void setReadTimeout(int timeoutMs) throws IOException;
 
   /**
    * @return                The receive buffer size.
    */
-  public int getReceiveBufferSize() throws IOException;
+  int getReceiveBufferSize() throws IOException;
 
   /**
    * @return                True if TCP_NODELAY is turned on.
    */
-  public boolean getTcpNoDelay() throws IOException;
+  boolean getTcpNoDelay() throws IOException;
 
   /**
    * Set the write timeout on this peer.
@@ -61,63 +61,63 @@ public interface Peer extends Closeable {
    *
    * @param timeoutMs       The timeout in milliseconds.
    */
-  public void setWriteTimeout(int timeoutMs) throws IOException;
+  void setWriteTimeout(int timeoutMs) throws IOException;
 
   /**
    * @return                true only if the peer is closed.
    */
-  public boolean isClosed();
-  
+  boolean isClosed();
+
   /**
    * Close the peer.
    *
    * It's safe to re-close a Peer that is already closed.
    */
-  public void close() throws IOException;
+  void close() throws IOException;
 
   /**
    * @return               A string representing the remote end of our
    *                       connection to the peer.
    */
-  public String getRemoteAddressString();
+  String getRemoteAddressString();
 
   /**
    * @return               A string representing the local end of our
    *                       connection to the peer.
    */
-  public String getLocalAddressString();
-  
+  String getLocalAddressString();
+
   /**
    * @return               An InputStream associated with the Peer.
    *                       This InputStream will be valid until you close
    *                       this peer with Peer#close.
    */
-  public InputStream getInputStream() throws IOException;
-  
+  InputStream getInputStream() throws IOException;
+
   /**
    * @return               An OutputStream associated with the Peer.
    *                       This OutputStream will be valid until you close
    *                       this peer with Peer#close.
    */
-  public OutputStream getOutputStream() throws IOException;
+  OutputStream getOutputStream() throws IOException;
 
   /**
    * @return               True if the peer resides on the same
    *                       computer as we.
    */
-  public boolean isLocal();
+  boolean isLocal();
 
   /**
    * @return               The DomainSocket associated with the current
    *                       peer, or null if there is none.
    */
-  public DomainSocket getDomainSocket();
-  
+  DomainSocket getDomainSocket();
+
   /**
    * Return true if the channel is secure.
    *
    * @return               True if our channel to this peer is not
    *                       susceptible to man-in-the-middle attacks.
    */
-  public boolean hasSecureChannel();
+  boolean hasSecureChannel();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
index 0dac290..710897e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
@@ -25,6 +25,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.*;
 
+import javax.annotation.Nonnull;
+
 /**************************************************
  * A Block is a Hadoop FS primitive, identified by a
  * long.
@@ -36,12 +38,10 @@ public class Block implements Writable, Comparable<Block> {
   public static final String BLOCK_FILE_PREFIX = "blk_";
   public static final String METADATA_EXTENSION = ".meta";
   static {                                      // register a ctor
-    WritableFactories.setFactory
-      (Block.class,
-       new WritableFactory() {
-         @Override
-         public Writable newInstance() { return new Block(); }
-       });
+    WritableFactories.setFactory(Block.class, new WritableFactory() {
+      @Override
+      public Writable newInstance() { return new Block(); }
+    });
   }
 
   public static final Pattern blockFilePattern = Pattern
@@ -208,20 +208,14 @@ public class Block implements Writable, Comparable<Block> 
{
   }
 
   @Override // Comparable
-  public int compareTo(Block b) {
+  public int compareTo(@Nonnull Block b) {
     return blockId < b.blockId ? -1 :
-           blockId > b.blockId ? 1 : 0;
+        blockId > b.blockId ? 1 : 0;
   }
 
   @Override // Object
   public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (!(o instanceof Block)) {
-      return false;
-    }
-    return compareTo((Block)o) == 0;
+    return this == o || o instanceof Block && compareTo((Block) o) == 0;
   }
 
   /**
@@ -230,9 +224,10 @@ public class Block implements Writable, Comparable<Block> {
    */
   public static boolean matchingIdAndGenStamp(Block a, Block b) {
     if (a == b) return true; // same block, or both null
-    if (a == null || b == null) return false; // only one null
-    return a.blockId == b.blockId &&
-           a.generationStamp == b.generationStamp;
+    // only one null
+    return !(a == null || b == null) &&
+        a.blockId == b.blockId &&
+        a.generationStamp == b.generationStamp;
   }
 
   @Override // Object

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java
index 69fa52d..fd4ae45 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java
@@ -33,7 +33,7 @@ public class BlockLocalPathInfo {
 
   /**
    * Constructs BlockLocalPathInfo.
-   * @param b The block corresponding to this lock path info. 
+   * @param b The block corresponding to this lock path info.
    * @param file Block data file.
    * @param metafile Metadata file for the block.
    */
@@ -48,12 +48,12 @@ public class BlockLocalPathInfo {
    * @return Block data file.
    */
   public String getBlockPath() {return localBlockPath;}
-  
+
   /**
    * @return the Block
    */
   public ExtendedBlock getBlock() { return block;}
-  
+
   /**
    * Get the Block metadata file.
    * @return Block metadata file.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
index 0225009..edb81e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
@@ -79,7 +79,7 @@ public class BlockStoragePolicy implements 
BlockStoragePolicySpi {
    * @return a list of {@link StorageType}s for storing the replicas of a 
block.
    */
   public List<StorageType> chooseStorageTypes(final short replication) {
-    final List<StorageType> types = new LinkedList<StorageType>();
+    final List<StorageType> types = new LinkedList<>();
     int i = 0, j = 0;
 
     // Do not return transient storage types. We will not have accurate
@@ -136,11 +136,11 @@ public class BlockStoragePolicy implements 
BlockStoragePolicySpi {
       final Iterable<StorageType> chosen,
       final EnumSet<StorageType> unavailables,
       final boolean isNewBlock) {
-    final List<StorageType> excess = new LinkedList<StorageType>();
+    final List<StorageType> excess = new LinkedList<>();
     final List<StorageType> storageTypes = chooseStorageTypes(
         replication, chosen, excess);
     final int expectedSize = storageTypes.size() - excess.size();
-    final List<StorageType> removed = new LinkedList<StorageType>();
+    final List<StorageType> removed = new LinkedList<>();
     for(int i = storageTypes.size() - 1; i >= 0; i--) {
       // replace/remove unavailable storage types.
       final StorageType t = storageTypes.get(i);
@@ -195,7 +195,7 @@ public class BlockStoragePolicy implements 
BlockStoragePolicySpi {
   public List<StorageType> chooseExcess(final short replication,
       final Iterable<StorageType> chosen) {
     final List<StorageType> types = chooseStorageTypes(replication);
-    final List<StorageType> excess = new LinkedList<StorageType>();
+    final List<StorageType> excess = new LinkedList<>();
     diff(types, chosen, excess);
     return excess;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveEntry.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveEntry.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveEntry.java
index fe3215f..e851735 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveEntry.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveEntry.java
@@ -42,4 +42,4 @@ public class CacheDirectiveEntry {
   public CacheDirectiveStats getStats() {
     return stats;
   }
-};
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
index 2305c59..d8a7de2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
@@ -46,7 +46,7 @@ public class CacheDirectiveInfo {
 
     /**
      * Builds a new CacheDirectiveInfo populated with the set properties.
-     * 
+     *
      * @return New CacheDirectiveInfo.
      */
     public CacheDirectiveInfo build() {
@@ -73,7 +73,7 @@ public class CacheDirectiveInfo {
 
     /**
      * Sets the id used in this request.
-     * 
+     *
      * @param id The id used in this request.
      * @return This builder, for call chaining.
      */
@@ -84,7 +84,7 @@ public class CacheDirectiveInfo {
 
     /**
      * Sets the path used in this request.
-     * 
+     *
      * @param path The path used in this request.
      * @return This builder, for call chaining.
      */
@@ -95,7 +95,7 @@ public class CacheDirectiveInfo {
 
     /**
      * Sets the replication used in this request.
-     * 
+     *
      * @param replication The replication used in this request.
      * @return This builder, for call chaining.
      */
@@ -106,7 +106,7 @@ public class CacheDirectiveInfo {
 
     /**
      * Sets the pool used in this request.
-     * 
+     *
      * @param pool The pool used in this request.
      * @return This builder, for call chaining.
      */
@@ -119,7 +119,7 @@ public class CacheDirectiveInfo {
      * Sets when the CacheDirective should expire. A
      * {@link CacheDirectiveInfo.Expiration} can specify either an absolute or
      * relative expiration time.
-     * 
+     *
      * @param expiration when this CacheDirective should expire
      * @return This builder, for call chaining
      */
@@ -156,7 +156,7 @@ public class CacheDirectiveInfo {
      * <p>
      * Use {@link Expiration#NEVER} to indicate an Expiration that never
      * expires.
-     * 
+     *
      * @param ms how long until the CacheDirective expires, in milliseconds
      * @return A relative Expiration
      */
@@ -169,7 +169,7 @@ public class CacheDirectiveInfo {
      * <p>
      * Use {@link Expiration#NEVER} to indicate an Expiration that never
      * expires.
-     * 
+     *
      * @param date when the CacheDirective expires
      * @return An absolute Expiration
      */
@@ -182,7 +182,7 @@ public class CacheDirectiveInfo {
      * <p>
      * Use {@link Expiration#NEVER} to indicate an Expiration that never
      * expires.
-     * 
+     *
      * @param ms when the CacheDirective expires, in milliseconds since the 
Unix
      *          epoch.
      * @return An absolute Expiration
@@ -350,9 +350,8 @@ public class CacheDirectiveInfo {
     }
     if (expiration != null) {
       builder.append(prefix).append("expiration: ").append(expiration);
-      prefix = ", ";
     }
     builder.append("}");
     return builder.toString();
   }
-};
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java
index f144a55..917457f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java
@@ -92,9 +92,8 @@ public class CacheDirectiveIterator
   @Override
   public BatchedEntries<CacheDirectiveEntry> makeRequest(Long prevKey)
       throws IOException {
-    BatchedEntries<CacheDirectiveEntry> entries = null;
-    TraceScope scope = tracer.newScope("listCacheDirectives");
-    try {
+    BatchedEntries<CacheDirectiveEntry> entries;
+    try (TraceScope ignored = tracer.newScope("listCacheDirectives")) {
       entries = namenode.listCacheDirectives(prevKey, filter);
     } catch (IOException e) {
       if (e.getMessage().contains("Filtering by ID is unsupported")) {
@@ -105,9 +104,9 @@ public class CacheDirectiveIterator
         // This is somewhat brittle, since it depends on directives being
         // returned in order of ascending ID.
         entries = namenode.listCacheDirectives(id - 1, filter);
-        for (int i=0; i<entries.size(); i++) {
+        for (int i = 0; i < entries.size(); i++) {
           CacheDirectiveEntry entry = entries.get(i);
-          if (entry.getInfo().getId().equals((Long)id)) {
+          if (entry.getInfo().getId().equals(id)) {
             return new SingleEntry(entry);
           }
         }
@@ -115,8 +114,6 @@ public class CacheDirectiveIterator
             "Did not find requested id " + id);
       }
       throw e;
-    } finally {
-      scope.close();
     }
     Preconditions.checkNotNull(entries);
     return entries;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java
index 0fd4ca2..23f94b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java
@@ -35,7 +35,7 @@ public class CacheDirectiveStats {
 
     /**
      * Builds a new CacheDirectiveStats populated with the set properties.
-     * 
+     *
      * @return New CacheDirectiveStats.
      */
     public CacheDirectiveStats build() {
@@ -51,7 +51,7 @@ public class CacheDirectiveStats {
 
     /**
      * Sets the bytes needed by this directive.
-     * 
+     *
      * @param bytesNeeded The bytes needed.
      * @return This builder, for call chaining.
      */
@@ -62,7 +62,7 @@ public class CacheDirectiveStats {
 
     /**
      * Sets the bytes cached by this directive.
-     * 
+     *
      * @param bytesCached The bytes cached.
      * @return This builder, for call chaining.
      */
@@ -83,7 +83,7 @@ public class CacheDirectiveStats {
 
     /**
      * Sets the files cached by this directive.
-     * 
+     *
      * @param filesCached The number of files cached.
      * @return This builder, for call chaining.
      */
@@ -94,7 +94,7 @@ public class CacheDirectiveStats {
 
     /**
      * Sets whether this directive has expired.
-     * 
+     *
      * @param hasExpired if this directive has expired
      * @return This builder, for call chaining.
      */
@@ -156,14 +156,10 @@ public class CacheDirectiveStats {
 
   @Override
   public String toString() {
-    StringBuilder builder = new StringBuilder();
-    builder.append("{");
-    builder.append("bytesNeeded: ").append(bytesNeeded);
-    builder.append(", ").append("bytesCached: ").append(bytesCached);
-    builder.append(", ").append("filesNeeded: ").append(filesNeeded);
-    builder.append(", ").append("filesCached: ").append(filesCached);
-    builder.append(", ").append("hasExpired: ").append(hasExpired);
-    builder.append("}");
-    return builder.toString();
+    return "{" + "bytesNeeded: " + bytesNeeded + ", "
+        + "bytesCached: " + bytesCached + ", "
+        + "filesNeeded: " + filesNeeded + ", "
+        + "filesCached: " + filesCached + ", "
+        + "hasExpired: " + hasExpired + "}";
   }
-};
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
index 41ec2f1..c8a70ac 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
@@ -145,7 +145,7 @@ public class CachePoolInfo {
   /**
    * Set the maximum relative expiration of directives of this pool in
    * milliseconds.
-   * 
+   *
    * @param ms in milliseconds
    * @return This builder, for call chaining.
    */
@@ -155,17 +155,15 @@ public class CachePoolInfo {
   }
 
   public String toString() {
-    return new StringBuilder().append("{").
-      append("poolName:").append(poolName).
-      append(", ownerName:").append(ownerName).
-      append(", groupName:").append(groupName).
-      append(", mode:").append((mode == null) ? "null" :
-          String.format("0%03o", mode.toShort())).
-      append(", limit:").append(limit).
-      append(", maxRelativeExpiryMs:").append(maxRelativeExpiryMs).
-      append("}").toString();
-  }
-  
+    return "{" + "poolName:" + poolName
+        + ", ownerName:" + ownerName
+        + ", groupName:" + groupName
+        + ", mode:"
+        + ((mode == null) ? "null" : String.format("0%03o", mode.toShort()))
+        + ", limit:" + limit
+        + ", maxRelativeExpiryMs:" + maxRelativeExpiryMs + "}";
+  }
+
   @Override
   public boolean equals(Object o) {
     if (o == null) { return false; }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolIterator.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolIterator.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolIterator.java
index 5e2bbf2..431b3a6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolIterator.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolIterator.java
@@ -47,11 +47,8 @@ public class CachePoolIterator
   @Override
   public BatchedEntries<CachePoolEntry> makeRequest(String prevKey)
       throws IOException {
-    TraceScope scope = tracer.newScope("listCachePools");
-    try {
+    try (TraceScope ignored = tracer.newScope("listCachePools")) {
       return namenode.listCachePools(prevKey);
-    } finally {
-      scope.close();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java
index c552652..f00d652 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java
@@ -66,7 +66,7 @@ public class CachePoolStats {
       return new CachePoolStats(bytesNeeded, bytesCached, bytesOverlimit,
           filesNeeded, filesCached);
     }
-  };
+  }
 
   private final long bytesNeeded;
   private final long bytesCached;
@@ -104,12 +104,10 @@ public class CachePoolStats {
   }
 
   public String toString() {
-    return new StringBuilder().append("{").
-      append("bytesNeeded:").append(bytesNeeded).
-      append(", bytesCached:").append(bytesCached).
-      append(", bytesOverlimit:").append(bytesOverlimit).
-      append(", filesNeeded:").append(filesNeeded).
-      append(", filesCached:").append(filesCached).
-      append("}").toString();
+    return "{" + "bytesNeeded:" + bytesNeeded
+        + ", bytesCached:" + bytesCached
+        + ", bytesOverlimit:" + bytesOverlimit
+        + ", filesNeeded:" + filesNeeded
+        + ", filesCached:" + filesCached + "}";
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
index dcb7d31..5621de0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
@@ -41,34 +41,34 @@ import org.apache.hadoop.security.token.TokenInfo;
 public interface ClientDatanodeProtocol {
   /**
    * Until version 9, this class ClientDatanodeProtocol served as both
-   * the client interface to the DN AND the RPC protocol used to 
+   * the client interface to the DN AND the RPC protocol used to
    * communicate with the NN.
-   * 
-   * This class is used by both the DFSClient and the 
+   *
+   * This class is used by both the DFSClient and the
    * DN server side to insulate from the protocol serialization.
-   * 
-   * If you are adding/changing DN's interface then you need to 
+   *
+   * If you are adding/changing DN's interface then you need to
    * change both this class and ALSO related protocol buffer
    * wire protocol definition in ClientDatanodeProtocol.proto.
-   * 
-   * For more details on protocol buffer wire protocol, please see 
+   *
+   * For more details on protocol buffer wire protocol, please see
    * .../org/apache/hadoop/hdfs/protocolPB/overview.html
-   * 
+   *
    * The log of historical changes can be retrieved from the svn).
    * 9: Added deleteBlockPool method
-   * 
+   *
    * 9 is the last version id when this class was used for protocols
-   *  serialization. DO not update this version any further. 
+   *  serialization. DO not update this version any further.
    */
-  public static final long versionID = 9L;
+  long versionID = 9L;
 
   /** Return the visible length of a replica. */
   long getReplicaVisibleLength(ExtendedBlock b) throws IOException;
-  
+
   /**
    * Refresh the list of federated namenodes from updated configuration
    * Adds new namenodes and stops the deleted namenodes.
-   * 
+   *
    * @throws IOException on error
    **/
   void refreshNamenodes() throws IOException;
@@ -76,19 +76,19 @@ public interface ClientDatanodeProtocol {
   /**
    * Delete the block pool directory. If force is false it is deleted only if
    * it is empty, otherwise it is deleted along with its contents.
-   * 
+   *
    * @param bpid Blockpool id to be deleted.
-   * @param force If false blockpool directory is deleted only if it is empty 
-   *          i.e. if it doesn't contain any block files, otherwise it is 
+   * @param force If false blockpool directory is deleted only if it is empty
+   *          i.e. if it doesn't contain any block files, otherwise it is
    *          deleted along with its contents.
    * @throws IOException
    */
   void deleteBlockPool(String bpid, boolean force) throws IOException;
-  
+
   /**
    * Retrieves the path names of the block file and metadata file stored on the
    * local file system.
-   * 
+   *
    * In order for this method to work, one of the following should be 
satisfied:
    * <ul>
    * <li>
@@ -98,7 +98,7 @@ public interface ClientDatanodeProtocol {
    * When security is enabled, kerberos authentication must be used to connect
    * to the datanode.</li>
    * </ul>
-   * 
+   *
    * @param block
    *          the specified block on the local datanode
    * @param token
@@ -109,7 +109,7 @@ public interface ClientDatanodeProtocol {
    */
   BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
       Token<BlockTokenIdentifier> token) throws IOException;
-  
+
   /**
    * Retrieves volume location information about a list of blocks on a 
datanode.
    * This is in the form of an opaque {@link org.apache.hadoop.fs.VolumeId}
@@ -136,9 +136,9 @@ public interface ClientDatanodeProtocol {
    *          down. The work includes advising clients to wait and saving
    *          certain states for quick restart. This should only be used when
    *          the stored data will remain the same during upgrade/restart.
-   * @throws IOException 
+   * @throws IOException
    */
-  void shutdownDatanode(boolean forUpgrade) throws IOException;  
+  void shutdownDatanode(boolean forUpgrade) throws IOException;
 
   /**
    * Obtains datanode info

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 6c194b4..ce93e63 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.retry.AtMostOnce;
 import org.apache.hadoop.io.retry.Idempotent;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenInfo;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java
index 481c130..499d69b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java
@@ -42,8 +42,10 @@ public class DSQuotaExceededException extends 
QuotaExceededException {
     String msg = super.getMessage();
     if (msg == null) {
       return "The DiskSpace quota" + (pathName==null?"": " of " + pathName)
-          + " is exceeded: quota = " + quota + " B = " + long2String(quota, 
"B", 2)
-          + " but diskspace consumed = " + count + " B = " + 
long2String(count, "B", 2);
+          + " is exceeded: quota = " + quota
+          + " B = " + long2String(quota, "B", 2)
+          + " but diskspace consumed = " + count
+          + " B = " + long2String(count, "B", 2);
     } else {
       return msg;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index 6d72285..86782f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -233,14 +233,10 @@ public class DatanodeID implements Comparable<DatanodeID> 
{
 
   @Override
   public boolean equals(Object to) {
-    if (this == to) {
-      return true;
-    }
-    if (!(to instanceof DatanodeID)) {
-      return false;
-    }
-    return (getXferAddr().equals(((DatanodeID)to).getXferAddr()) &&
-        datanodeUuid.equals(((DatanodeID)to).getDatanodeUuid()));
+    return this == to ||
+        (to instanceof DatanodeID &&
+            getXferAddr().equals(((DatanodeID) to).getXferAddr()) &&
+            datanodeUuid.equals(((DatanodeID) to).getDatanodeUuid()));
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index 0f294c9..96075fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -52,7 +52,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   private int xceiverCount;
   private String location = NetworkTopology.DEFAULT_RACK;
   private String softwareVersion;
-  private List<String> dependentHostNames = new LinkedList<String>();
+  private List<String> dependentHostNames = new LinkedList<>();
   private String upgradeDomain;
 
   // Datanode administrative states
@@ -358,18 +358,18 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
     float cacheRemainingPercent = getCacheRemainingPercent();
     String lookupName = NetUtils.getHostNameOfIP(getName());
 
-    buffer.append("Name: "+ getName());
+    buffer.append("Name: ").append(getName());
     if (lookupName != null) {
-      buffer.append(" (" + lookupName + ")");
+      buffer.append(" (").append(lookupName).append(")");
     }
     buffer.append("\n");
-    buffer.append("Hostname: " + getHostName() + "\n");
+    buffer.append("Hostname: ").append(getHostName()).append("\n");
 
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
-      buffer.append("Rack: "+location+"\n");
+      buffer.append("Rack: ").append(location).append("\n");
     }
     if (upgradeDomain != null) {
-      buffer.append("Upgrade domain: "+ upgradeDomain +"\n");
+      buffer.append("Upgrade domain: ").append(upgradeDomain).append("\n");
     }
     buffer.append("Decommission Status : ");
     if (isDecommissioned()) {
@@ -379,19 +379,30 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
     } else {
       buffer.append("Normal\n");
     }
-    buffer.append("Configured Capacity: "+c+" 
("+StringUtils.byteDesc(c)+")"+"\n");
-    buffer.append("DFS Used: "+u+" ("+StringUtils.byteDesc(u)+")"+"\n");
-    buffer.append("Non DFS Used: "+nonDFSUsed+" 
("+StringUtils.byteDesc(nonDFSUsed)+")"+"\n");
-    buffer.append("DFS Remaining: " +r+ " ("+StringUtils.byteDesc(r)+")"+"\n");
-    buffer.append("DFS Used%: "+percent2String(usedPercent) + "\n");
-    buffer.append("DFS Remaining%: "+percent2String(remainingPercent) + "\n");
-    buffer.append("Configured Cache Capacity: "+cc+" 
("+StringUtils.byteDesc(cc)+")"+"\n");
-    buffer.append("Cache Used: "+cu+" ("+StringUtils.byteDesc(cu)+")"+"\n");
-    buffer.append("Cache Remaining: " +cr+ " 
("+StringUtils.byteDesc(cr)+")"+"\n");
-    buffer.append("Cache Used%: "+percent2String(cacheUsedPercent) + "\n");
-    buffer.append("Cache Remaining%: "+percent2String(cacheRemainingPercent) + 
"\n");
-    buffer.append("Xceivers: "+getXceiverCount()+"\n");
-    buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
+    buffer.append("Configured Capacity: ").append(c).append(" (")
+        .append(StringUtils.byteDesc(c)).append(")").append("\n");
+    buffer.append("DFS Used: ").append(u).append(" (")
+        .append(StringUtils.byteDesc(u)).append(")").append("\n");
+    buffer.append("Non DFS Used: ").append(nonDFSUsed).append(" (")
+        .append(StringUtils.byteDesc(nonDFSUsed)).append(")").append("\n");
+    buffer.append("DFS Remaining: ").append(r).append(" (")
+        .append(StringUtils.byteDesc(r)).append(")").append("\n");
+    buffer.append("DFS Used%: ").append(percent2String(usedPercent))
+        .append("\n");
+    buffer.append("DFS Remaining%: ").append(percent2String(remainingPercent))
+        .append("\n");
+    buffer.append("Configured Cache Capacity: ").append(cc).append(" (")
+        .append(StringUtils.byteDesc(cc)).append(")").append("\n");
+    buffer.append("Cache Used: ").append(cu).append(" (")
+        .append(StringUtils.byteDesc(cu)).append(")").append("\n");
+    buffer.append("Cache Remaining: ").append(cr).append(" (")
+        .append(StringUtils.byteDesc(cr)).append(")").append("\n");
+    buffer.append("Cache Used%: ").append(percent2String(cacheUsedPercent))
+        .append("\n");
+    buffer.append("Cache Remaining%: ")
+        .append(percent2String(cacheRemainingPercent)).append("\n");
+    buffer.append("Xceivers: ").append(getXceiverCount()).append("\n");
+    buffer.append("Last contact: ").append(new Date(lastUpdate)).append("\n");
     return buffer.toString();
   }
 
@@ -408,10 +419,10 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
     float cacheUsedPercent = getCacheUsedPercent();
     buffer.append(getName());
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
-      buffer.append(" "+location);
+      buffer.append(" ").append(location);
     }
     if (upgradeDomain != null) {
-      buffer.append(" " + upgradeDomain);
+      buffer.append(" ").append(upgradeDomain);
     }
     if (isDecommissioned()) {
       buffer.append(" DD");
@@ -420,15 +431,21 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
     } else {
       buffer.append(" IN");
     }
-    buffer.append(" " + c + "(" + StringUtils.byteDesc(c)+")");
-    buffer.append(" " + u + "(" + StringUtils.byteDesc(u)+")");
-    buffer.append(" " + percent2String(usedPercent));
-    buffer.append(" " + r + "(" + StringUtils.byteDesc(r)+")");
-    buffer.append(" " + cc + "(" + StringUtils.byteDesc(cc)+")");
-    buffer.append(" " + cu + "(" + StringUtils.byteDesc(cu)+")");
-    buffer.append(" " + percent2String(cacheUsedPercent));
-    buffer.append(" " + cr + "(" + StringUtils.byteDesc(cr)+")");
-    buffer.append(" " + new Date(lastUpdate));
+    buffer.append(" ").append(c).append("(").append(StringUtils.byteDesc(c))
+        .append(")");
+    buffer.append(" ").append(u).append("(").append(StringUtils.byteDesc(u))
+        .append(")");
+    buffer.append(" ").append(percent2String(usedPercent));
+    buffer.append(" ").append(r).append("(").append(StringUtils.byteDesc(r))
+        .append(")");
+    buffer.append(" ").append(cc).append("(").append(StringUtils.byteDesc(cc))
+        .append(")");
+    buffer.append(" ").append(cu).append("(").append(StringUtils.byteDesc(cu))
+        .append(")");
+    buffer.append(" ").append(percent2String(cacheUsedPercent));
+    buffer.append(" ").append(cr).append("(").append(StringUtils.byteDesc(cr))
+        .append(")");
+    buffer.append(" ").append(new Date(lastUpdate));
     return buffer.toString();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
index 18c940b..7c1143b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.protocol;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -29,7 +28,7 @@ public class DatanodeInfoWithStorage extends DatanodeInfo {
   private final StorageType storageType;
 
   public DatanodeInfoWithStorage(DatanodeInfo from, String storageID,
-                                 StorageType storageType) {
+      StorageType storageType) {
     super(from);
     this.storageID = storageID;
     this.storageType = storageType;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java
index b7b2289..8bff150 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java
@@ -55,10 +55,8 @@ public class DatanodeLocalInfo {
 
   /** A formatted string for printing the status of the DataNode. */
   public String getDatanodeLocalReport() {
-    StringBuilder buffer = new StringBuilder();
-    buffer.append("Uptime: " + getUptime());
-    buffer.append(", Software version: " + getSoftwareVersion());
-    buffer.append(", Config version: " + getConfigVersion());
-    return buffer.toString();
+    return ("Uptime: " + getUptime())
+        + ", Software version: " + getSoftwareVersion()
+        + ", Config version: " + getConfigVersion();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
index a3cff82..eb6a0c0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
@@ -39,7 +39,7 @@ public class EncryptionZoneIterator
   private final Tracer tracer;
 
   public EncryptionZoneIterator(ClientProtocol namenode, Tracer tracer) {
-    super(Long.valueOf(0));
+    super((long) 0);
     this.namenode = namenode;
     this.tracer = tracer;
   }
@@ -47,11 +47,8 @@ public class EncryptionZoneIterator
   @Override
   public BatchedEntries<EncryptionZone> makeRequest(Long prevId)
       throws IOException {
-    TraceScope scope = tracer.newScope("listEncryptionZones");
-    try {
+    try (TraceScope ignored = tracer.newScope("listEncryptionZones")) {
       return namenode.listEncryptionZones(prevId);
-    } finally {
-      scope.close();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index 34f429a..5a5b5f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@ -33,7 +33,8 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
 @InterfaceStability.Evolving
 public class HdfsFileStatus {
 
-  private final byte[] path;  // local name of the inode that's encoded in 
java UTF8
+  // local name of the inode that's encoded in java UTF8
+  private final byte[] path;
   private final byte[] symlink; // symlink target encoded in java UTF8 or null
   private final long length;
   private final boolean isdir;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
index 23e8f57..62de6f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 
-/** 
+/**
  * Interface that represents the over the wire information
  * including block locations for a file.
  */
@@ -38,7 +38,7 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
 
   /**
    * Constructor
-   * 
+   *
    * @param length size
    * @param isdir if this is directory
    * @param block_replication the file's replication factor
@@ -49,7 +49,7 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
    * @param owner owner
    * @param group group
    * @param symlink symbolic link
-   * @param path local path name in java UTF8 format 
+   * @param path local path name in java UTF8 format
    * @param fileId the file id
    * @param locations block locations
    * @param feInfo file encryption info

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java
index 1cd80f9..b9c8b96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java
@@ -31,7 +31,8 @@ public class LastBlockWithStatus {
 
   private final HdfsFileStatus fileStatus;
 
-  public LastBlockWithStatus(LocatedBlock lastBlock, HdfsFileStatus 
fileStatus) {
+  public LastBlockWithStatus(LocatedBlock lastBlock,
+      HdfsFileStatus fileStatus) {
     this.lastBlock = lastBlock;
     this.fileStatus = fileStatus;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index cc13f10..7ddfb8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -65,13 +65,13 @@ public class LocatedBlock {
   }
 
   public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs,
-                      String[] storageIDs, StorageType[] storageTypes) {
+      String[] storageIDs, StorageType[] storageTypes) {
     this(b, locs, storageIDs, storageTypes, -1, false, EMPTY_LOCS);
   }
 
   public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, String[] 
storageIDs,
-                      StorageType[] storageTypes, long startOffset,
-                      boolean corrupt, DatanodeInfo[] cachedLocs) {
+      StorageType[] storageTypes, long startOffset,
+      boolean corrupt, DatanodeInfo[] cachedLocs) {
     this.b = b;
     this.offset = startOffset;
     this.corrupt = corrupt;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
index e4896977..8c79253 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
@@ -32,7 +32,8 @@ import org.apache.hadoop.fs.FileEncryptionInfo;
 @InterfaceStability.Evolving
 public class LocatedBlocks {
   private final long fileLength;
-  private final List<LocatedBlock> blocks; // array of blocks with prioritized 
locations
+  // array of blocks with prioritized locations
+  private final List<LocatedBlock> blocks;
   private final boolean underConstruction;
   private final LocatedBlock lastLocatedBlock;
   private final boolean isLastBlockComplete;
@@ -123,22 +124,22 @@ public class LocatedBlocks {
     key.setStartOffset(offset);
     key.getBlock().setNumBytes(1);
     Comparator<LocatedBlock> comp =
-      new Comparator<LocatedBlock>() {
-        // Returns 0 iff a is inside b or b is inside a
-        @Override
-        public int compare(LocatedBlock a, LocatedBlock b) {
-          long aBeg = a.getStartOffset();
-          long bBeg = b.getStartOffset();
-          long aEnd = aBeg + a.getBlockSize();
-          long bEnd = bBeg + b.getBlockSize();
-          if(aBeg <= bBeg && bEnd <= aEnd
-              || bBeg <= aBeg && aEnd <= bEnd)
-            return 0; // one of the blocks is inside the other
-          if(aBeg < bBeg)
-            return -1; // a's left bound is to the left of the b's
-          return 1;
-        }
-      };
+        new Comparator<LocatedBlock>() {
+          // Returns 0 iff a is inside b or b is inside a
+          @Override
+          public int compare(LocatedBlock a, LocatedBlock b) {
+            long aBeg = a.getStartOffset();
+            long bBeg = b.getStartOffset();
+            long aEnd = aBeg + a.getBlockSize();
+            long bEnd = bBeg + b.getBlockSize();
+            if(aBeg <= bBeg && bEnd <= aEnd
+                || bBeg <= aBeg && aEnd <= bEnd)
+              return 0; // one of the blocks is inside the other
+            if(aBeg < bBeg)
+              return -1; // a's left bound is to the left of the b's
+            return 1;
+          }
+        };
     return Collections.binarySearch(blocks, key, comp);
   }
 
@@ -176,14 +177,10 @@ public class LocatedBlocks {
 
   @Override
   public String toString() {
-    final StringBuilder b = new StringBuilder(getClass().getSimpleName());
-    b.append("{")
-     .append("\n  fileLength=").append(fileLength)
-     .append("\n  underConstruction=").append(underConstruction)
-     .append("\n  blocks=").append(blocks)
-     .append("\n  lastLocatedBlock=").append(lastLocatedBlock)
-     .append("\n  isLastBlockComplete=").append(isLastBlockComplete)
-     .append("}");
-    return b.toString();
+    return getClass().getSimpleName() + "{" + "\n  fileLength=" + fileLength
+        + "\n  underConstruction=" + underConstruction
+        + "\n  blocks=" + blocks
+        + "\n  lastLocatedBlock=" + lastLocatedBlock
+        + "\n  isLastBlockComplete=" + isLastBlockComplete + "}";
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1257483e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java
index eeedd5a..68ac58e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java
@@ -43,7 +43,7 @@ public final class NSQuotaExceededException extends 
QuotaExceededException {
     String msg = super.getMessage();
     if (msg == null) {
       msg = "The NameSpace quota (directories and files)" +
-      (pathName==null?"":(" of directory " + pathName)) +
+          (pathName==null?"":(" of directory " + pathName)) +
           " is exceeded: quota=" + quota + " file count=" + count;
 
       if (prefix != null) {

Reply via email to