HADOOP-15785. [JDK10] Javadoc build fails on JDK 10 in hadoop-common. 
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b57f2f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b57f2f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b57f2f7

Branch: refs/heads/HDFS-12943
Commit: 7b57f2f71fbaa5af4897309597cca70a95b04edd
Parents: f261c31
Author: Takanobu Asanuma <tasan...@apache.org>
Authored: Thu Oct 11 13:51:51 2018 +0900
Committer: Takanobu Asanuma <tasan...@apache.org>
Committed: Thu Oct 11 13:51:51 2018 +0900

----------------------------------------------------------------------
 .../org/apache/hadoop/conf/Configurable.java    | 10 +++-
 .../org/apache/hadoop/conf/Configuration.java   | 45 +++++++++--------
 .../hadoop/conf/ConfigurationWithLogging.java   | 14 +++---
 .../org/apache/hadoop/crypto/CryptoCodec.java   |  7 +--
 .../apache/hadoop/crypto/CryptoInputStream.java |  4 +-
 .../hadoop/crypto/CryptoOutputStream.java       |  4 +-
 .../org/apache/hadoop/crypto/Decryptor.java     | 14 +++---
 .../org/apache/hadoop/crypto/Encryptor.java     | 14 +++---
 .../org/apache/hadoop/crypto/OpensslCipher.java | 18 +++----
 .../hadoop/crypto/key/JavaKeyStoreProvider.java | 15 +++---
 .../apache/hadoop/crypto/key/KeyProvider.java   |  6 +--
 .../crypto/key/KeyProviderCryptoExtension.java  |  9 ++--
 .../KeyProviderDelegationTokenExtension.java    |  2 +-
 .../crypto/key/kms/KMSClientProvider.java       |  6 +--
 .../hadoop/crypto/key/kms/ValueQueue.java       |  2 +-
 .../crypto/random/OpensslSecureRandom.java      | 10 ++--
 .../apache/hadoop/fs/AbstractFileSystem.java    | 45 +++++++++--------
 .../apache/hadoop/fs/BufferedFSInputStream.java |  4 +-
 .../apache/hadoop/fs/ByteBufferReadable.java    |  8 +--
 .../apache/hadoop/fs/ChecksumFileSystem.java    |  2 +-
 .../java/org/apache/hadoop/fs/ChecksumFs.java   |  2 +-
 .../hadoop/fs/CommonConfigurationKeys.java      |  2 +-
 .../java/org/apache/hadoop/fs/CreateFlag.java   |  2 +-
 .../org/apache/hadoop/fs/FSDataInputStream.java |  1 +
 .../org/apache/hadoop/fs/FSInputChecker.java    |  2 +-
 .../java/org/apache/hadoop/fs/FileContext.java  | 51 +++++++++++---------
 .../java/org/apache/hadoop/fs/FileSystem.java   | 11 ++---
 .../java/org/apache/hadoop/fs/FileUtil.java     |  4 +-
 .../org/apache/hadoop/fs/HarFileSystem.java     |  2 +-
 .../hadoop/fs/HasEnhancedByteBufferAccess.java  | 15 +++---
 .../org/apache/hadoop/fs/LocalDirAllocator.java |  3 +-
 .../org/apache/hadoop/fs/LocalFileSystem.java   |  2 +-
 .../main/java/org/apache/hadoop/fs/Options.java |  2 +-
 .../java/org/apache/hadoop/fs/QuotaUsage.java   |  8 +--
 .../org/apache/hadoop/fs/ftp/FTPFileSystem.java |  6 +--
 .../org/apache/hadoop/fs/ftp/FtpConfigKeys.java |  1 +
 .../apache/hadoop/fs/local/LocalConfigKeys.java |  1 +
 .../apache/hadoop/fs/permission/AclStatus.java  |  2 +-
 .../apache/hadoop/fs/permission/AclUtil.java    | 10 ++--
 .../hadoop/fs/permission/ScopedAclEntries.java  | 14 +++---
 .../org/apache/hadoop/fs/shell/Command.java     | 24 ++++-----
 .../apache/hadoop/fs/shell/CommandFormat.java   |  2 +-
 .../org/apache/hadoop/fs/viewfs/ViewFs.java     | 12 ++---
 .../apache/hadoop/ha/ActiveStandbyElector.java  | 32 ++++++------
 .../main/java/org/apache/hadoop/ha/HAAdmin.java |  2 +-
 .../java/org/apache/hadoop/ha/NodeFencer.java   |  2 +-
 .../org/apache/hadoop/ha/SshFenceByTcpPort.java |  2 +-
 .../org/apache/hadoop/http/HttpServer2.java     | 10 ++--
 .../org/apache/hadoop/io/EnumSetWritable.java   |  2 +-
 .../main/java/org/apache/hadoop/io/IOUtils.java |  2 +-
 .../org/apache/hadoop/io/ReadaheadPool.java     |  2 +-
 .../org/apache/hadoop/io/SecureIOUtils.java     |  4 +-
 .../java/org/apache/hadoop/io/SequenceFile.java | 16 +++---
 .../java/org/apache/hadoop/io/Writable.java     |  4 +-
 .../apache/hadoop/io/WritableComparable.java    |  7 +--
 .../org/apache/hadoop/io/WritableUtils.java     |  6 ++-
 .../io/compress/CompressionCodecFactory.java    |  8 +--
 .../org/apache/hadoop/io/compress/Lz4Codec.java |  4 +-
 .../apache/hadoop/io/compress/SnappyCodec.java  |  2 +-
 .../io/compress/bzip2/Bzip2Compressor.java      |  2 +-
 .../io/compress/bzip2/Bzip2Decompressor.java    |  6 +--
 .../hadoop/io/compress/bzip2/Bzip2Factory.java  |  4 +-
 .../io/compress/bzip2/CBZip2InputStream.java    | 26 +++++-----
 .../io/compress/bzip2/CBZip2OutputStream.java   |  7 +--
 .../compress/zlib/BuiltInGzipDecompressor.java  |  8 +--
 .../hadoop/io/compress/zlib/ZlibCompressor.java |  2 +-
 .../io/compress/zlib/ZlibDecompressor.java      |  6 +--
 .../hadoop/io/compress/zlib/ZlibFactory.java    |  4 +-
 .../apache/hadoop/io/erasurecode/CodecUtil.java |  2 +-
 .../erasurecode/rawcoder/util/GaloisField.java  |  2 +-
 .../org/apache/hadoop/io/file/tfile/TFile.java  |  8 +--
 .../org/apache/hadoop/io/file/tfile/Utils.java  | 48 ++++++++++--------
 .../org/apache/hadoop/io/retry/RetryProxy.java  |  4 +-
 .../hadoop/io/serializer/Deserializer.java      |  2 +-
 .../apache/hadoop/io/serializer/Serializer.java |  2 +-
 .../org/apache/hadoop/ipc/CallerContext.java    |  2 +-
 .../main/java/org/apache/hadoop/ipc/Client.java |  9 ++--
 .../java/org/apache/hadoop/ipc/ClientCache.java |  6 +--
 .../apache/hadoop/ipc/DecayRpcScheduler.java    |  3 +-
 .../org/apache/hadoop/ipc/RefreshHandler.java   |  1 -
 .../org/apache/hadoop/ipc/RemoteException.java  |  2 +-
 .../main/java/org/apache/hadoop/ipc/Server.java |  2 +-
 .../org/apache/hadoop/jmx/JMXJsonServlet.java   |  6 +--
 .../apache/hadoop/log/LogThrottlingHelper.java  |  8 +--
 .../metrics2/lib/MutableRollingAverages.java    |  2 +-
 .../apache/hadoop/metrics2/package-info.java    |  3 ++
 .../metrics2/sink/RollingFileSystemSink.java    |  2 +-
 .../apache/hadoop/metrics2/sink/StatsDSink.java |  4 +-
 .../org/apache/hadoop/metrics2/util/MBeans.java | 10 ++--
 .../hadoop/net/AbstractDNSToSwitchMapping.java  |  8 +--
 .../main/java/org/apache/hadoop/net/DNS.java    |  2 +-
 .../apache/hadoop/net/DNSToSwitchMapping.java   |  2 +-
 .../java/org/apache/hadoop/net/NetUtils.java    | 14 +++---
 .../org/apache/hadoop/net/NetworkTopology.java  | 12 ++---
 .../net/NetworkTopologyWithNodeGroup.java       |  4 +-
 .../apache/hadoop/net/ScriptBasedMapping.java   |  8 +--
 .../net/ScriptBasedMappingWithDependency.java   |  7 ++-
 .../apache/hadoop/net/SocketOutputStream.java   |  6 +--
 .../AuthenticationFilterInitializer.java        |  6 +--
 .../hadoop/security/HadoopKerberosName.java     |  2 +-
 .../security/IdMappingServiceProvider.java      |  5 +-
 .../apache/hadoop/security/SaslRpcClient.java   | 10 ++--
 .../apache/hadoop/security/SecurityUtil.java    |  3 +-
 .../hadoop/security/UserGroupInformation.java   |  2 +-
 .../security/alias/JavaKeyStoreProvider.java    |  6 +--
 .../alias/LocalJavaKeyStoreProvider.java        |  3 +-
 .../hadoop/security/authorize/ProxyUsers.java   |  4 +-
 .../security/ssl/FileBasedKeyStoresFactory.java |  4 +-
 .../apache/hadoop/security/ssl/SSLFactory.java  |  4 +-
 .../security/ssl/SSLHostnameVerifier.java       | 20 ++++----
 .../web/DelegationTokenAuthenticatedURL.java    | 14 +++---
 .../DelegationTokenAuthenticationFilter.java    |  6 +--
 .../DelegationTokenAuthenticationHandler.java   |  4 +-
 .../delegation/web/DelegationTokenManager.java  |  2 +-
 ...rosDelegationTokenAuthenticationHandler.java |  2 +-
 .../KerberosDelegationTokenAuthenticator.java   |  2 +-
 ...emeDelegationTokenAuthenticationHandler.java |  3 +-
 ...udoDelegationTokenAuthenticationHandler.java |  2 +-
 .../web/PseudoDelegationTokenAuthenticator.java |  2 +-
 .../hadoop/service/ServiceOperations.java       |  4 +-
 .../service/launcher/ServiceLauncher.java       |  2 +-
 .../hadoop/service/launcher/package-info.java   |  2 +-
 .../java/org/apache/hadoop/util/ClassUtil.java  |  1 -
 .../apache/hadoop/util/ComparableVersion.java   |  2 +-
 .../java/org/apache/hadoop/util/FindClass.java  |  4 +-
 .../hadoop/util/GenericOptionsParser.java       | 21 ++++----
 .../apache/hadoop/util/HttpExceptionUtils.java  |  6 +--
 .../apache/hadoop/util/JsonSerialization.java   |  2 +-
 .../apache/hadoop/util/LightWeightCache.java    | 10 ++--
 .../java/org/apache/hadoop/util/LineReader.java |  3 --
 .../org/apache/hadoop/util/MachineList.java     |  2 +-
 .../apache/hadoop/util/ShutdownHookManager.java |  2 +-
 .../hadoop/util/ShutdownThreadsHelper.java      |  2 -
 .../org/apache/hadoop/util/StringUtils.java     |  6 +--
 .../main/java/org/apache/hadoop/util/Tool.java  |  2 +-
 .../java/org/apache/hadoop/util/ZKUtil.java     |  4 +-
 .../hadoop/util/bloom/DynamicBloomFilter.java   |  5 +-
 .../apache/hadoop/util/concurrent/AsyncGet.java |  2 +-
 .../apache/hadoop/util/hash/JenkinsHash.java    |  4 +-
 .../org/apache/hadoop/cli/util/CLICommand.java  |  6 +--
 .../apache/hadoop/cli/util/CLICommandFS.java    |  4 +-
 .../apache/hadoop/cli/util/CLICommandTypes.java |  6 +--
 .../rawcoder/RawErasureCoderBenchmark.java      |  4 +-
 .../hadoop/io/retry/TestDefaultRetryPolicy.java |  4 +-
 .../org/apache/hadoop/net/StaticMapping.java    |  4 +-
 .../apache/hadoop/tracing/SetSpanReceiver.java  |  2 +-
 146 files changed, 518 insertions(+), 483 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configurable.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configurable.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configurable.java
index d847f29..5816039 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configurable.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configurable.java
@@ -26,9 +26,15 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceStability.Stable
 public interface Configurable {
 
-  /** Set the configuration to be used by this object. */
+  /**
+   * Set the configuration to be used by this object.
+   * @param conf configuration to be used
+   */
   void setConf(Configuration conf);
 
-  /** Return the configuration used by this object. */
+  /**
+   * Return the configuration used by this object.
+   * @return Configuration
+   */
   Configuration getConf();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index c004cb5..478e56e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -115,7 +115,7 @@ import static 
org.apache.commons.lang3.StringUtils.isNotBlank;
 /**
  * Provides access to configuration parameters.
  *
- * <h4 id="Resources">Resources</h4>
+ * <h3 id="Resources">Resources</h3>
  *
  * <p>Configurations are specified by resources. A resource contains a set of
  * name/value pairs as XML data. Each resource is named by either a 
@@ -141,12 +141,12 @@ import static 
org.apache.commons.lang3.StringUtils.isNotBlank;
  * Once a resource declares a value final, no subsequently-loaded 
  * resource can alter that value.  
  * For example, one might define a final parameter with:
- * <tt><pre>
+ * <pre><code>
  *  &lt;property&gt;
  *    &lt;name&gt;dfs.hosts.include&lt;/name&gt;
  *    &lt;value&gt;/etc/hadoop/conf/hosts.include&lt;/value&gt;
  *    <b>&lt;final&gt;true&lt;/final&gt;</b>
- *  &lt;/property&gt;</pre></tt>
+ *  &lt;/property&gt;</code></pre>
  *
  * Administrators typically define parameters as final in 
  * <tt>core-site.xml</tt> for values that user applications may not alter.
@@ -164,7 +164,7 @@ import static 
org.apache.commons.lang3.StringUtils.isNotBlank;
  *
  * <p>For example, if a configuration resource contains the following property
  * definitions: 
- * <tt><pre>
+ * <pre><code>
  *  &lt;property&gt;
  *    &lt;name&gt;basedir&lt;/name&gt;
  *    &lt;value&gt;/user/${<i>user.name</i>}&lt;/value&gt;
@@ -179,7 +179,7 @@ import static 
org.apache.commons.lang3.StringUtils.isNotBlank;
  *    &lt;name&gt;otherdir&lt;/name&gt;
  *    &lt;value&gt;${<i>env.BASE_DIR</i>}/other&lt;/value&gt;
  *  &lt;/property&gt;
- *  </pre></tt>
+ *  </code></pre>
  *
  * <p>When <tt>conf.get("tempdir")</tt> is called, then 
<tt>${<i>basedir</i>}</tt>
  * will be resolved to another property in this Configuration, while
@@ -203,7 +203,7 @@ import static 
org.apache.commons.lang3.StringUtils.isNotBlank;
  * can define there own custom tags in  hadoop.tags.custom property.
  *
  * <p>For example, we can tag existing property as:
- * <tt><pre>
+ * <pre><code>
  *  &lt;property&gt;
  *    &lt;name&gt;dfs.replication&lt;/name&gt;
  *    &lt;value&gt;3&lt;/value&gt;
@@ -215,7 +215,7 @@ import static 
org.apache.commons.lang3.StringUtils.isNotBlank;
  *    &lt;value&gt;3&lt;/value&gt;
  *    &lt;tag&gt;HDFS,SECURITY&lt;/tag&gt;
  *  &lt;/property&gt;
- * </pre></tt>
+ * </code></pre>
  * <p> Properties marked with tags can be retrieved with <tt>conf
  * .getAllPropertiesByTag("HDFS")</tt> or <tt>conf.getAllPropertiesByTags
  * (Arrays.asList("YARN","SECURITY"))</tt>.</p>
@@ -581,9 +581,9 @@ public class Configuration implements 
Iterable<Map.Entry<String,String>>,
    * If you have multiple deprecation entries to add, it is more efficient to
    * use #addDeprecations(DeprecationDelta[] deltas) instead.
    * 
-   * @param key
-   * @param newKeys
-   * @param customMessage
+   * @param key to be deprecated
+   * @param newKeys list of keys that take up the values of deprecated key
+   * @param customMessage depcrication message
    * @deprecated use {@link #addDeprecation(String key, String newKey,
       String customMessage)} instead
    */
@@ -605,9 +605,9 @@ public class Configuration implements 
Iterable<Map.Entry<String,String>>,
    * If you have multiple deprecation entries to add, it is more efficient to
    * use #addDeprecations(DeprecationDelta[] deltas) instead.
    *
-   * @param key
-   * @param newKey
-   * @param customMessage
+   * @param key to be deprecated
+   * @param newKey key that take up the values of deprecated key
+   * @param customMessage deprecation message
    */
   public static void addDeprecation(String key, String newKey,
              String customMessage) {
@@ -1428,6 +1428,7 @@ public class Configuration implements 
Iterable<Map.Entry<String,String>>,
 
   /**
    * Unset a previously set property.
+   * @param name the property name
    */
   public synchronized void unset(String name) {
     String[] names = null;
@@ -1717,6 +1718,7 @@ public class Configuration implements 
Iterable<Map.Entry<String,String>>,
    * is equivalent to <code>set(&lt;name&gt;, value.toString())</code>.
    * @param name property name
    * @param value new value
+   * @param <T> enumeration type
    */
   public <T extends Enum<T>> void setEnum(String name, T value) {
     set(name, value.toString());
@@ -1727,8 +1729,10 @@ public class Configuration implements 
Iterable<Map.Entry<String,String>>,
    * Note that the returned value is trimmed by this method.
    * @param name Property name
    * @param defaultValue Value returned if no mapping exists
+   * @param <T> enumeration type
    * @throws IllegalArgumentException If mapping is illegal for the type
    * provided
+   * @return enumeration type
    */
   public <T extends Enum<T>> T getEnum(String name, T defaultValue) {
     final String val = getTrimmed(name);
@@ -1807,6 +1811,7 @@ public class Configuration implements 
Iterable<Map.Entry<String,String>>,
    * @param unit Unit to convert the stored property, if it exists.
    * @throws NumberFormatException If the property stripped of its unit is not
    *         a number
+   * @return time duration in given time unit
    */
   public long getTimeDuration(String name, long defaultValue, TimeUnit unit) {
     String vStr = get(name);
@@ -2299,6 +2304,7 @@ public class Configuration implements 
Iterable<Map.Entry<String,String>>,
    * the CredentialProvider API and conditionally fallsback to config.
    * @param name property name
    * @return password
+   * @throws IOException when error in fetching password
    */
   public char[] getPassword(String name) throws IOException {
     char[] pass = null;
@@ -2358,7 +2364,7 @@ public class Configuration implements 
Iterable<Map.Entry<String,String>>,
    * alias.
    * @param name alias of the provisioned credential
    * @return password or null if not found
-   * @throws IOException
+   * @throws IOException when error in fetching password
    */
   public char[] getPasswordFromCredentialProviders(String name)
       throws IOException {
@@ -3425,25 +3431,23 @@ public class Configuration implements 
Iterable<Map.Entry<String,String>>,
   /**
    * Write out the non-default properties in this configuration to the
    * given {@link Writer}.
-   *
+   * <ul>
    * <li>
    * When property name is not empty and the property exists in the
    * configuration, this method writes the property and its attributes
    * to the {@link Writer}.
    * </li>
-   * <p>
    *
    * <li>
    * When property name is null or empty, this method writes all the
    * configuration properties and their attributes to the {@link Writer}.
    * </li>
-   * <p>
    *
    * <li>
    * When property name is not empty but the property doesn't exist in
    * the configuration, this method throws an {@link IllegalArgumentException}.
    * </li>
-   * <p>
+   * </ul>
    * @param out the writer to write to.
    */
   public void writeXml(String propertyName, Writer out)
@@ -3553,7 +3557,7 @@ public class Configuration implements 
Iterable<Map.Entry<String,String>>,
   /**
    *  Writes properties and their attributes (final and resource)
    *  to the given {@link Writer}.
-   *
+   *  <ul>
    *  <li>
    *  When propertyName is not empty, and the property exists
    *  in the configuration, the format of the output would be,
@@ -3593,6 +3597,7 @@ public class Configuration implements 
Iterable<Map.Entry<String,String>>,
    *  found in the configuration, this method will throw an
    *  {@link IllegalArgumentException}.
    *  </li>
+   *  </ul>
    *  <p>
    * @param config the configuration
    * @param propertyName property name
@@ -3791,7 +3796,7 @@ public class Configuration implements 
Iterable<Map.Entry<String,String>>,
   /**
    * get keys matching the the regex 
    * @param regex
-   * @return Map<String,String> with matching keys
+   * @return {@literal Map<String,String>} with matching keys
    */
   public Map<String,String> getValByRegex(String regex) {
     Pattern p = Pattern.compile(regex);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfigurationWithLogging.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfigurationWithLogging.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfigurationWithLogging.java
index 8a5e054..68c5172 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfigurationWithLogging.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfigurationWithLogging.java
@@ -41,7 +41,7 @@ public class ConfigurationWithLogging extends Configuration {
   }
 
   /**
-   * @see Configuration#get(String).
+   * See {@link Configuration#get(String)}.
    */
   @Override
   public String get(String name) {
@@ -51,7 +51,7 @@ public class ConfigurationWithLogging extends Configuration {
   }
 
   /**
-   * @see Configuration#get(String, String).
+   * See {@link Configuration#get(String, String)}.
    */
   @Override
   public String get(String name, String defaultValue) {
@@ -62,7 +62,7 @@ public class ConfigurationWithLogging extends Configuration {
   }
 
   /**
-   * @see Configuration#getBoolean(String, boolean).
+   * See {@link Configuration#getBoolean(String, boolean)}.
    */
   @Override
   public boolean getBoolean(String name, boolean defaultValue) {
@@ -72,7 +72,7 @@ public class ConfigurationWithLogging extends Configuration {
   }
 
   /**
-   * @see Configuration#getFloat(String, float).
+   * See {@link Configuration#getFloat(String, float)}.
    */
   @Override
   public float getFloat(String name, float defaultValue) {
@@ -82,7 +82,7 @@ public class ConfigurationWithLogging extends Configuration {
   }
 
   /**
-   * @see Configuration#getInt(String, int).
+   * See {@link Configuration#getInt(String, int)}.
    */
   @Override
   public int getInt(String name, int defaultValue) {
@@ -92,7 +92,7 @@ public class ConfigurationWithLogging extends Configuration {
   }
 
   /**
-   * @see Configuration#getLong(String, long).
+   * See {@link Configuration#getLong(String, long)}.
    */
   @Override
   public long getLong(String name, long defaultValue) {
@@ -102,7 +102,7 @@ public class ConfigurationWithLogging extends Configuration 
{
   }
 
   /**
-   * @see Configuration#set(String, String, String).
+   * See {@link Configuration#set(String, String, String)}.
    */
   @Override
   public void set(String name, String value, String source) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java
index d9c16bb..bcf4a65 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java
@@ -158,14 +158,15 @@ public abstract class CryptoCodec implements 
Configurable, Closeable {
    * For example a {@link javax.crypto.Cipher} will maintain its encryption 
    * context internally when we do encryption/decryption using the 
    * Cipher#update interface. 
-   * <p/>
+   * <p>
    * Encryption/Decryption is not always on the entire file. For example,
    * in Hadoop, a node may only decrypt a portion of a file (i.e. a split).
    * In these situations, the counter is derived from the file position.
-   * <p/>
+   * <p>
    * The IV can be calculated by combining the initial IV and the counter with 
    * a lossless operation (concatenation, addition, or XOR).
-   * @see 
http://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_.28CTR.29
+   * See http://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_
+   * .28CTR.29
    * 
    * @param initIV initial IV
    * @param counter counter for input stream position 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
index a2273bf..5c879ec 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
@@ -53,10 +53,10 @@ import org.apache.hadoop.util.StringUtils;
  * required in order to ensure that the plain text and cipher text have a 1:1
  * mapping. The decryption is buffer based. The key points of the decryption
  * are (1) calculating the counter and (2) padding through stream position:
- * <p/>
+ * <p>
  * counter = base + pos/(algorithm blocksize); 
  * padding = pos%(algorithm blocksize); 
- * <p/>
+ * <p>
  * The underlying stream offset is maintained as state.
  */
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
index 2f347c5..8d11043 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
@@ -36,10 +36,10 @@ import com.google.common.base.Preconditions;
  * required in order to ensure that the plain text and cipher text have a 1:1
  * mapping. The encryption is buffer based. The key points of the encryption 
are
  * (1) calculating counter and (2) padding through stream position.
- * <p/>
+ * <p>
  * counter = base + pos/(algorithm blocksize); 
  * padding = pos%(algorithm blocksize); 
- * <p/>
+ * <p>
  * The underlying stream offset is maintained as state.
  *
  * Note that while some of this class' methods are synchronized, this is just 
to

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java
index 9958415..7556f18 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java
@@ -38,7 +38,7 @@ public interface Decryptor {
   
   /**
    * Indicate whether the decryption context is reset.
-   * <p/>
+   * <p>
    * Certain modes, like CTR, require a different IV depending on the 
    * position in the stream. Generally, the decryptor maintains any necessary
    * context for calculating the IV and counter so that no reinit is necessary 
@@ -49,22 +49,22 @@ public interface Decryptor {
   
   /**
    * This presents a direct interface decrypting with direct ByteBuffers.
-   * <p/>
+   * <p>
    * This function does not always decrypt the entire buffer and may 
potentially
    * need to be called multiple times to process an entire buffer. The object 
    * may hold the decryption context internally.
-   * <p/>
+   * <p>
    * Some implementations may require sufficient space in the destination 
    * buffer to decrypt the entire input buffer.
-   * <p/>
+   * <p>
    * Upon return, inBuffer.position() will be advanced by the number of bytes
    * read and outBuffer.position() by bytes written. Implementations should 
    * not modify inBuffer.limit() and outBuffer.limit().
-   * <p/>
+   * <p>
    * @param inBuffer a direct {@link ByteBuffer} to read from. inBuffer may 
-   * not be null and inBuffer.remaining() must be > 0
+   * not be null and inBuffer.remaining() must be {@literal >} 0
    * @param outBuffer a direct {@link ByteBuffer} to write to. outBuffer may 
-   * not be null and outBuffer.remaining() must be > 0
+   * not be null and outBuffer.remaining() must be {@literal >} 0
    * @throws IOException if decryption fails
    */
   public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java
index 6dc3cfb..faeb176 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java
@@ -37,7 +37,7 @@ public interface Encryptor {
   
   /**
    * Indicate whether the encryption context is reset.
-   * <p/>
+   * <p>
    * Certain modes, like CTR, require a different IV depending on the
    * position in the stream. Generally, the encryptor maintains any necessary
    * context for calculating the IV and counter so that no reinit is necessary
@@ -48,22 +48,22 @@ public interface Encryptor {
   
   /**
    * This presents a direct interface encrypting with direct ByteBuffers.
-   * <p/>
+   * <p>
    * This function does not always encrypt the entire buffer and may 
potentially
    * need to be called multiple times to process an entire buffer. The object 
    * may hold the encryption context internally.
-   * <p/>
+   * <p>
    * Some implementations may require sufficient space in the destination 
    * buffer to encrypt the entire input buffer.
-   * <p/>
+   * <p>
    * Upon return, inBuffer.position() will be advanced by the number of bytes
    * read and outBuffer.position() by bytes written. Implementations should
    * not modify inBuffer.limit() and outBuffer.limit().
-   * <p/>
+   * <p>
    * @param inBuffer a direct {@link ByteBuffer} to read from. inBuffer may 
-   * not be null and inBuffer.remaining() must be > 0
+   * not be null and inBuffer.remaining() must be &gt; 0
    * @param outBuffer a direct {@link ByteBuffer} to write to. outBuffer may 
-   * not be null and outBuffer.remaining() must be > 0
+   * not be null and outBuffer.remaining() must be &gt; 0
    * @throws IOException if encryption fails
    */
   public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
index 133a9f9..0a2ba52 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
@@ -107,12 +107,12 @@ public final class OpensslCipher {
   }
   
   /**
-   * Return an <code>OpensslCipher<code> object that implements the specified
+   * Return an <code>OpensslCipher</code> object that implements the specified
    * transformation.
    * 
    * @param transformation the name of the transformation, e.g., 
    * AES/CTR/NoPadding.
-   * @return OpensslCipher an <code>OpensslCipher<code> object
+   * @return OpensslCipher an <code>OpensslCipher</code> object
    * @throws NoSuchAlgorithmException if <code>transformation</code> is null, 
    * empty, in an invalid format, or if Openssl doesn't implement the 
    * specified algorithm.
@@ -181,18 +181,18 @@ public final class OpensslCipher {
   /**
    * Continues a multiple-part encryption or decryption operation. The data
    * is encrypted or decrypted, depending on how this cipher was initialized.
-   * <p/>
+   * <p>
    * 
    * All <code>input.remaining()</code> bytes starting at 
    * <code>input.position()</code> are processed. The result is stored in
    * the output buffer.
-   * <p/>
+   * <p>
    * 
    * Upon return, the input buffer's position will be equal to its limit;
    * its limit will not have changed. The output buffer's position will have
    * advanced by n, when n is the value returned by this method; the output
    * buffer's limit will not have changed.
-   * <p/>
+   * <p>
    * 
    * If <code>output.remaining()</code> bytes are insufficient to hold the
    * result, a <code>ShortBufferException</code> is thrown.
@@ -218,21 +218,21 @@ public final class OpensslCipher {
   /**
    * Finishes a multiple-part operation. The data is encrypted or decrypted,
    * depending on how this cipher was initialized.
-   * <p/>
+   * <p>
    * 
    * The result is stored in the output buffer. Upon return, the output 
buffer's
    * position will have advanced by n, where n is the value returned by this
    * method; the output buffer's limit will not have changed.
-   * <p/>
+   * <p>
    * 
    * If <code>output.remaining()</code> bytes are insufficient to hold the 
result,
    * a <code>ShortBufferException</code> is thrown.
-   * <p/>
+   * <p>
    * 
    * Upon finishing, this method resets this cipher object to the state it was
    * in when previously initialized. That is, the object is available to 
encrypt
    * or decrypt more data.
-   * <p/>
+   * <p>
    * 
    * If any exception is thrown, this cipher object need to be reset before it 
    * can be used again.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
index 5beda0d..7951af5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
@@ -62,23 +62,24 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 /**
  * KeyProvider based on Java's KeyStore file format. The file may be stored in
  * any Hadoop FileSystem using the following name mangling:
- *  jks://h...@nn1.example.com/my/keys.jks -> 
hdfs://nn1.example.com/my/keys.jks
- *  jks://file/home/owen/keys.jks -> file:///home/owen/keys.jks
- * <p/>
+ *  jks://h...@nn1.example.com/my/keys.jks {@literal ->}
+ *  hdfs://nn1.example.com/my/keys.jks
+ *  jks://file/home/owen/keys.jks {@literal ->} file:///home/owen/keys.jks
+ * <p>
  * If the <code>HADOOP_KEYSTORE_PASSWORD</code> environment variable is set,
  * its value is used as the password for the keystore.
- * <p/>
+ * <p>
  * If the <code>HADOOP_KEYSTORE_PASSWORD</code> environment variable is not 
set,
  * the password for the keystore is read from file specified in the
  * {@link #KEYSTORE_PASSWORD_FILE_KEY} configuration property. The password 
file
  * is looked up in Hadoop's configuration directory via the classpath.
- * <p/>
+ * <p>
  * <b>NOTE:</b> Make sure the password in the password file does not have an
  * ENTER at the end, else it won't be valid for the Java KeyStore.
- * <p/>
+ * <p>
  * If the environment variable, nor the property are not set, the password used
  * is 'none'.
- * <p/>
+ * <p>
  * It is expected for encrypted InputFormats and OutputFormats to copy the keys
  * from the original provider into the job's Credentials object, which is
  * accessed via the UserProvider. Therefore, this provider won't be used by

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
index 286312c..2380f7a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
@@ -49,7 +49,7 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY
  * abstraction to separate key storage from users of encryption. It
  * is intended to support getting or storing keys in a variety of ways,
  * including third party bindings.
- * <P/>
+ * <p>
  * <code>KeyProvider</code> implementations must be thread safe.
  */
 @InterfaceAudience.Public
@@ -549,7 +549,7 @@ public abstract class KeyProvider {
   /**
    * Create a new key generating the material for it.
    * The given key must not already exist.
-   * <p/>
+   * <p>
    * This implementation generates the key material and calls the
    * {@link #createKey(String, byte[], Options)} method.
    *
@@ -593,7 +593,7 @@ public abstract class KeyProvider {
 
   /**
    * Roll a new version of the given key generating the material for it.
-   * <p/>
+   * <p>
    * This implementation generates the key material and calls the
    * {@link #rollNewVersion(String, byte[])} method.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
index 3ee3bd7..00d7a7d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
@@ -149,7 +149,7 @@ public class KeyProviderCryptoExtension extends
      * Derive the initialization vector (IV) for the encryption key from the IV
      * of the encrypted key. This derived IV is used with the encryption key to
      * decrypt the encrypted key.
-     * <p/>
+     * <p>
      * The alternative to this is using the same IV for both the encryption key
      * and the encrypted key. Even a simple symmetric transformation like this
      * improves security by avoiding IV re-use. IVs will also be fairly unique
@@ -195,7 +195,7 @@ public class KeyProviderCryptoExtension extends
      * The generated key material is of the same
      * length as the <code>KeyVersion</code> material of the latest key version
      * of the key and is encrypted using the same cipher.
-     * <p/>
+     * <p>
      * NOTE: The generated key is not stored by the <code>KeyProvider</code>
      * 
      * @param encryptionKeyName
@@ -498,7 +498,7 @@ public class KeyProviderCryptoExtension extends
    * and initialization vector. The generated key material is of the same
    * length as the <code>KeyVersion</code> material and is encrypted using the
    * same cipher.
-   * <p/>
+   * <p>
    * NOTE: The generated key is not stored by the <code>KeyProvider</code>
    *
    * @param encryptionKeyName The latest KeyVersion of this key's material will
@@ -576,7 +576,6 @@ public class KeyProviderCryptoExtension extends
    * NOTE: The generated key is not stored by the <code>KeyProvider</code>
    *
    * @param  ekvs List containing the EncryptedKeyVersion's
-   * @return      The re-encrypted EncryptedKeyVersion's, in the same order.
    * @throws IOException If any EncryptedKeyVersion could not be re-encrypted
    * @throws GeneralSecurityException If any EncryptedKeyVersion could not be
    *                            re-encrypted because of a cryptographic issue.
@@ -589,7 +588,7 @@ public class KeyProviderCryptoExtension extends
   /**
    * Creates a <code>KeyProviderCryptoExtension</code> using a given
    * {@link KeyProvider}.
-   * <p/>
+   * <p>
    * If the given <code>KeyProvider</code> implements the
    * {@link CryptoExtension} interface the <code>KeyProvider</code> itself
    * will provide the extension functionality.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
index 9212cbc..a63b7d5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
@@ -113,7 +113,7 @@ public class KeyProviderDelegationTokenExtension extends
   /**
    * Creates a <code>KeyProviderDelegationTokenExtension</code> using a given 
    * {@link KeyProvider}.
-   * <p/>
+   * <p>
    * If the given <code>KeyProvider</code> implements the 
    * {@link DelegationTokenExtension} interface the <code>KeyProvider</code> 
    * itself will provide the extension functionality, otherwise a default 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 8125510..26b528c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -246,12 +246,12 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 
     /**
      * This provider expects URIs in the following form :
-     * kms://<PROTO>@<AUTHORITY>/<PATH>
+     * {@literal kms://<PROTO>@<AUTHORITY>/<PATH>}
      *
      * where :
      * - PROTO = http or https
-     * - AUTHORITY = <HOSTS>[:<PORT>]
-     * - HOSTS = <HOSTNAME>[;<HOSTS>]
+     * - AUTHORITY = {@literal <HOSTS>[:<PORT>]}
+     * - HOSTS = {@literal <HOSTNAME>[;<HOSTS>]}
      * - HOSTNAME = string
      * - PORT = integer
      *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
index 1ddd8a3..b2ae084 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
@@ -344,7 +344,7 @@ public class ValueQueue <E> {
    * <code>SyncGenerationPolicy</code> specified by the user.
    * @param keyName String key name
    * @param num Minimum number of values to return.
-   * @return List<E> values returned
+   * @return {@literal List<E>} values returned
    * @throws IOException
    * @throws ExecutionException
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
index 1219bf9..1863f5e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
@@ -30,16 +30,16 @@ import org.slf4j.LoggerFactory;
 /**
  * OpenSSL secure random using JNI.
  * This implementation is thread-safe.
- * <p/>
+ * <p>
  * 
  * If using an Intel chipset with RDRAND, the high-performance hardware 
  * random number generator will be used and it's much faster than
  * {@link java.security.SecureRandom}. If RDRAND is unavailable, default
  * OpenSSL secure random generator will be used. It's still faster
  * and can generate strong random bytes.
- * <p/>
- * @see https://wiki.openssl.org/index.php/Random_Numbers
- * @see http://en.wikipedia.org/wiki/RdRand
+ * <p>
+ * See https://wiki.openssl.org/index.php/Random_Numbers
+ * See http://en.wikipedia.org/wiki/RdRand
  */
 @InterfaceAudience.Private
 public class OpensslSecureRandom extends Random {
@@ -97,7 +97,7 @@ public class OpensslSecureRandom extends Random {
    * random bits (right justified, with leading zeros).
    *
    * @param numBits number of random bits to be generated, where
-   * 0 <= <code>numBits</code> <= 32.
+   * 0 {@literal <=} <code>numBits</code> {@literal <=} 32.
    *
    * @return int an <code>int</code> containing the user-specified number
    * of random bits (right justified, with leading zeros).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index cf484ca..721f009 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -336,7 +336,7 @@ public abstract class AbstractFileSystem {
    * The default port of this file system.
    * 
    * @return default port of this file system's Uri scheme
-   *         A uri with a port of -1 => default port;
+   *         A uri with a port of -1 =&gt; default port;
    */
   public abstract int getUriDefaultPort();
 
@@ -478,9 +478,11 @@ public abstract class AbstractFileSystem {
    * through any internal symlinks or mount point
    * @param p path to be resolved
    * @return fully qualified path 
-   * @throws FileNotFoundException, AccessControlException, IOException
-   *         UnresolvedLinkException if symbolic link on path cannot be 
resolved
-   *          internally
+   * @throws FileNotFoundException
+   * @throws AccessControlException
+   * @throws IOException
+   * @throws UnresolvedLinkException if symbolic link on path cannot be
+   * resolved internally
    */
    public Path resolvePath(final Path p) throws FileNotFoundException,
            UnresolvedLinkException, AccessControlException, IOException {
@@ -1021,7 +1023,7 @@ public abstract class AbstractFileSystem {
    * changes.  (Modifications are merged into the current ACL.)
    *
    * @param path Path to modify
-   * @param aclSpec List<AclEntry> describing modifications
+   * @param aclSpec List{@literal <AclEntry>} describing modifications
    * @throws IOException if an ACL could not be modified
    */
   public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
@@ -1035,7 +1037,7 @@ public abstract class AbstractFileSystem {
    * retained.
    *
    * @param path Path to modify
-   * @param aclSpec List<AclEntry> describing entries to remove
+   * @param aclSpec List{@literal <AclEntry>} describing entries to remove
    * @throws IOException if an ACL could not be modified
    */
   public void removeAclEntries(Path path, List<AclEntry> aclSpec)
@@ -1075,8 +1077,9 @@ public abstract class AbstractFileSystem {
    * entries.
    *
    * @param path Path to modify
-   * @param aclSpec List<AclEntry> describing modifications, must include 
entries
-   *   for user, group, and others for compatibility with permission bits.
+   * @param aclSpec List{@literal <AclEntry>} describing modifications, must
+   * include entries for user, group, and others for compatibility with
+   * permission bits.
    * @throws IOException if an ACL could not be modified
    */
   public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
@@ -1088,7 +1091,7 @@ public abstract class AbstractFileSystem {
    * Gets the ACLs of files and directories.
    *
    * @param path Path to get
-   * @return RemoteIterator<AclStatus> which returns each AclStatus
+   * @return RemoteIterator{@literal <AclStatus>} which returns each AclStatus
    * @throws IOException if an ACL could not be read
    */
   public AclStatus getAclStatus(Path path) throws IOException {
@@ -1100,7 +1103,7 @@ public abstract class AbstractFileSystem {
    * Set an xattr of a file or directory.
    * The name must be prefixed with the namespace followed by ".". For example,
    * "user.attr".
-   * <p/>
+   * <p>
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to modify
@@ -1118,7 +1121,7 @@ public abstract class AbstractFileSystem {
    * Set an xattr of a file or directory.
    * The name must be prefixed with the namespace followed by ".". For example,
    * "user.attr".
-   * <p/>
+   * <p>
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to modify
@@ -1137,7 +1140,7 @@ public abstract class AbstractFileSystem {
    * Get an xattr for a file or directory.
    * The name must be prefixed with the namespace followed by ".". For example,
    * "user.attr".
-   * <p/>
+   * <p>
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attribute
@@ -1154,11 +1157,13 @@ public abstract class AbstractFileSystem {
    * Get all of the xattrs for a file or directory.
    * Only those xattrs for which the logged-in user has permissions to view
    * are returned.
-   * <p/>
+   * <p>
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
-   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   *
+   * @return {@literal Map<String, byte[]>} describing the XAttrs of the file
+   * or directory
    * @throws IOException
    */
   public Map<String, byte[]> getXAttrs(Path path) throws IOException {
@@ -1170,12 +1175,13 @@ public abstract class AbstractFileSystem {
    * Get all of the xattrs for a file or directory.
    * Only those xattrs for which the logged-in user has permissions to view
    * are returned.
-   * <p/>
+   * <p>
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
    * @param names XAttr names.
-   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @return {@literal Map<String, byte[]>} describing the XAttrs of the file
+   * or directory
    * @throws IOException
    */
   public Map<String, byte[]> getXAttrs(Path path, List<String> names)
@@ -1188,11 +1194,12 @@ public abstract class AbstractFileSystem {
    * Get all of the xattr names for a file or directory.
    * Only the xattr names for which the logged-in user has permissions to view
    * are returned.
-   * <p/>
+   * <p>
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
-   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @return {@literal Map<String, byte[]>} describing the XAttrs of the file
+   * or directory
    * @throws IOException
    */
   public List<String> listXAttrs(Path path)
@@ -1205,7 +1212,7 @@ public abstract class AbstractFileSystem {
    * Remove an xattr of a file or directory.
    * The name must be prefixed with the namespace followed by ".". For example,
    * "user.attr".
-   * <p/>
+   * <p>
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to remove extended attribute

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
index 2eb8b95..973b136 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 
 
 /**
- * A class that optimizes reading from FSInputStream by buffering
+ * A class that optimizes reading from FSInputStream by buffering.
  */
 
 @InterfaceAudience.Private
@@ -44,7 +44,7 @@ implements Seekable, PositionedReadable, HasFileDescriptor {
    *
    * @param   in     the underlying input stream.
    * @param   size   the buffer size.
-   * @exception IllegalArgumentException if size <= 0.
+   * @exception IllegalArgumentException if size {@literal <=} 0.
    */
   public BufferedFSInputStream(FSInputStream in, int size) {
     super(in, size);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferReadable.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferReadable.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferReadable.java
index 20f7224..926b554 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferReadable.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferReadable.java
@@ -32,18 +32,18 @@ public interface ByteBufferReadable {
   /**
    * Reads up to buf.remaining() bytes into buf. Callers should use
    * buf.limit(..) to control the size of the desired read.
-   * <p/>
+   * <p>
    * After a successful call, buf.position() will be advanced by the number 
    * of bytes read and buf.limit() should be unchanged.
-   * <p/>
+   * <p>
    * In the case of an exception, the values of buf.position() and buf.limit()
    * are undefined, and callers should be prepared to recover from this
    * eventuality.
-   * <p/>
+   * <p>
    * Many implementations will throw {@link UnsupportedOperationException}, so
    * callers that are not confident in support for this method from the
    * underlying filesystem should be prepared to handle that exception.
-   * <p/>
+   * <p>
    * Implementations should treat 0-length requests as legitimate, and must not
    * signal an error upon their receipt.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index 663c910..3db3173 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.util.Progressable;
  * Abstract Checksumed FileSystem.
  * It provide a basic implementation of a Checksumed FileSystem,
  * which creates a checksum file for each raw file.
- * It generates & verifies checksums at the client side.
+ * It generates &amp; verifies checksums at the client side.
  *
  *****************************************************************/
 @InterfaceAudience.Public

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index c56f6e0..663b05d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -42,7 +42,7 @@ import org.slf4j.LoggerFactory;
  * Abstract Checksumed Fs.
  * It provide a basic implementation of a Checksumed Fs,
  * which creates a checksum file for each raw file.
- * It generates & verifies checksums at the client side.
+ * It generates &amp; verifies checksums at the client side.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable 
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 1eb27f8..72e5309 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -309,7 +309,7 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
     "dr.who";
 
   /**
-   * User->groups static mapping to override the groups lookup
+   * User{@literal ->}groups static mapping to override the groups lookup
    */
   public static final String HADOOP_USER_GROUP_STATIC_OVERRIDES = 
       "hadoop.user.group.static.mapping.overrides";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
index c3e088b..58b5f70 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability;
  * CreateFlag specifies the file create semantic. Users can combine flags 
like: <br>
  * <code>
  * EnumSet.of(CreateFlag.CREATE, CreateFlag.APPEND)
- * <code>
+ * </code>
  * <p>
  * 
  * Use the CreateFlag as follows:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
index 08d71f1..62c45f1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.fs;
 
 import java.io.DataInputStream;
+import java.io.EOFException;
 import java.io.FileDescriptor;
 import java.io.FileInputStream;
 import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
index 4f06e26..de66eab 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
@@ -101,7 +101,7 @@ abstract public class FSInputChecker extends FSInputStream {
    *     Implementors should simply pass through to the underlying data stream.
    * or
    *  (b) needChecksum() will return true:
-   *    - len >= maxChunkSize
+   *    - len {@literal >=} maxChunkSize
    *    - checksum.length is a multiple of CHECKSUM_SIZE
    *    Implementors should read an integer number of data chunks into
    *    buf. The amount read should be bounded by len or by 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 0b3889b..084f8f1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -765,7 +765,7 @@ public class FileContext {
    * Make(create) a directory and all the non-existent parents.
    * 
    * @param dir - the dir to make
-   * @param permission - permissions is set permission&~umask
+   * @param permission - permissions is set permission{@literal &~}umask
    * @param createParent - if true then missing parent dirs are created if 
false
    *          then parent must exist
    * 
@@ -979,7 +979,6 @@ public class FileContext {
   /**
    * Renames Path src to Path dst
    * <ul>
-   * <li
    * <li>Fails if src is a file and dst is a directory.
    * <li>Fails if src is a directory and dst is a file.
    * <li>Fails if the parent of dst does not exist or is a file.
@@ -1001,7 +1000,7 @@ public class FileContext {
    * 
    * @throws AccessControlException If access is denied
    * @throws FileAlreadyExistsException If <code>dst</code> already exists and
-   *           <code>options</options> has {@link Options.Rename#OVERWRITE} 
+   *           <code>options</code> has {@link Options.Rename#OVERWRITE}
    *           option false.
    * @throws FileNotFoundException If <code>src</code> does not exist
    * @throws ParentNotDirectoryException If parent of <code>dst</code> is not a
@@ -1250,7 +1249,7 @@ public class FileContext {
    * checks to perform.  If the requested permissions are granted, then the
    * method returns normally.  If access is denied, then the method throws an
    * {@link AccessControlException}.
-   * <p/>
+   * <p>
    * The default implementation of this method calls {@link 
#getFileStatus(Path)}
    * and checks the returned permissions against the requested permissions.
    * Note that the getFileStatus call will be subject to authorization checks.
@@ -1497,9 +1496,9 @@ public class FileContext {
    * <pre>
    * Given a path referring to a symlink of form:
    * 
-   *   <---X---> 
+   *   {@literal <---}X{@literal --->}
    *   fs://host/A/B/link 
-   *   <-----Y----->
+   *   {@literal <-----}Y{@literal ----->}
    * 
    * In this path X is the scheme and authority that identify the file system,
    * and Y is the path leading up to the final path component "link". If Y is
@@ -1536,7 +1535,7 @@ public class FileContext {
    *
    *
    * @throws AccessControlException If access is denied
-   * @throws FileAlreadyExistsException If file <code>linkcode> already exists
+   * @throws FileAlreadyExistsException If file <code>link</code> already 
exists
    * @throws FileNotFoundException If <code>target</code> does not exist
    * @throws ParentNotDirectoryException If parent of <code>link</code> is not 
a
    *           directory.
@@ -2038,7 +2037,6 @@ public class FileContext {
      * <dl>
      *  <dd>
      *   <dl>
-     *    <p>
      *    <dt> <tt> ? </tt>
      *    <dd> Matches any single character.
      *
@@ -2400,7 +2398,8 @@ public class FileContext {
    * changes.  (Modifications are merged into the current ACL.)
    *
    * @param path Path to modify
-   * @param aclSpec List<AclEntry> describing modifications
+   * @param aclSpec List{@literal <}AclEntry{@literal >} describing
+   * modifications
    * @throws IOException if an ACL could not be modified
    */
   public void modifyAclEntries(final Path path, final List<AclEntry> aclSpec)
@@ -2421,7 +2420,8 @@ public class FileContext {
    * retained.
    *
    * @param path Path to modify
-   * @param aclSpec List<AclEntry> describing entries to remove
+   * @param aclSpec List{@literal <}AclEntry{@literal >} describing entries
+   * to remove
    * @throws IOException if an ACL could not be modified
    */
   public void removeAclEntries(final Path path, final List<AclEntry> aclSpec)
@@ -2481,8 +2481,9 @@ public class FileContext {
    * entries.
    *
    * @param path Path to modify
-   * @param aclSpec List<AclEntry> describing modifications, must include 
entries
-   *   for user, group, and others for compatibility with permission bits.
+   * @param aclSpec List{@literal <}AclEntry{@literal >} describing
+   * modifications, must include entries for user, group, and others for
+   * compatibility with permission bits.
    * @throws IOException if an ACL could not be modified
    */
   public void setAcl(Path path, final List<AclEntry> aclSpec)
@@ -2502,7 +2503,8 @@ public class FileContext {
    * Gets the ACLs of files and directories.
    *
    * @param path Path to get
-   * @return RemoteIterator<AclStatus> which returns each AclStatus
+   * @return RemoteIterator{@literal <}AclStatus{@literal >} which returns
+   *         each AclStatus
    * @throws IOException if an ACL could not be read
    */
   public AclStatus getAclStatus(Path path) throws IOException {
@@ -2520,7 +2522,7 @@ public class FileContext {
    * Set an xattr of a file or directory.
    * The name must be prefixed with the namespace followed by ".". For example,
    * "user.attr".
-   * <p/>
+   * <p>
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to modify
@@ -2538,7 +2540,7 @@ public class FileContext {
    * Set an xattr of a file or directory.
    * The name must be prefixed with the namespace followed by ".". For example,
    * "user.attr".
-   * <p/>
+   * <p>
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to modify
@@ -2564,7 +2566,7 @@ public class FileContext {
    * Get an xattr for a file or directory.
    * The name must be prefixed with the namespace followed by ".". For example,
    * "user.attr".
-   * <p/>
+   * <p>
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attribute
@@ -2587,11 +2589,12 @@ public class FileContext {
    * Get all of the xattrs for a file or directory.
    * Only those xattrs for which the logged-in user has permissions to view
    * are returned.
-   * <p/>
+   * <p>
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
-   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @return Map{@literal <}String, byte[]{@literal >} describing the XAttrs
+   * of the file or directory
    * @throws IOException
    */
   public Map<String, byte[]> getXAttrs(Path path) throws IOException {
@@ -2609,12 +2612,13 @@ public class FileContext {
    * Get all of the xattrs for a file or directory.
    * Only those xattrs for which the logged-in user has permissions to view
    * are returned.
-   * <p/>
+   * <p>
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
    * @param names XAttr names.
-   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @return Map{@literal <}String, byte[]{@literal >} describing the XAttrs
+   * of the file or directory
    * @throws IOException
    */
   public Map<String, byte[]> getXAttrs(Path path, final List<String> names)
@@ -2633,7 +2637,7 @@ public class FileContext {
    * Remove an xattr of a file or directory.
    * The name must be prefixed with the namespace followed by ".". For example,
    * "user.attr".
-   * <p/>
+   * <p>
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to remove extended attribute
@@ -2656,11 +2660,12 @@ public class FileContext {
    * Get all of the xattr names for a file or directory.
    * Only those xattr names which the logged-in user has permissions to view
    * are returned.
-   * <p/>
+   * <p>
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
-   * @return List<String> of the XAttr names of the file or directory
+   * @return List{@literal <}String{@literal >} of the XAttr names of the
+   * file or directory
    * @throws IOException
    */
   public List<String> listXAttrs(Path path) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index c309941..66b6d44 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -684,7 +684,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
    * Create a file with the provided permission.
    *
    * The permission of the file is set to be the provided permission as in
-   * setPermission, not permission&~umask
+   * setPermission, not permission{@literal &~}umask
    *
    * The HDFS implementation is implemented using two RPCs.
    * It is understood that it is inefficient,
@@ -709,7 +709,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   /**
    * Create a directory with the provided permission.
    * The permission of the directory is set to be the provided permission as in
-   * setPermission, not permission&~umask
+   * setPermission, not permission{@literal &~}umask
    *
    * @see #create(FileSystem, Path, FsPermission)
    *
@@ -789,7 +789,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
    * <pre>
    *   if f == null :
    *     result = null
-   *   elif f.getLen() <= start:
+   *   elif f.getLen() {@literal <=} start:
    *     result = []
    *   else result = [ locations(FS, b) for b in blocks(FS, p, s, s+l)]
    * </pre>
@@ -2017,7 +2017,6 @@ public abstract class FileSystem extends Configured 
implements Closeable {
    * <dl>
    *  <dd>
    *   <dl>
-   *    <p>
    *    <dt> <tt> ? </tt>
    *    <dd> Matches any single character.
    *
@@ -2916,7 +2915,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
    * changes.  (Modifications are merged into the current ACL.)
    *
    * @param path Path to modify
-   * @param aclSpec List<AclEntry> describing modifications
+   * @param aclSpec List&lt;AclEntry&gt; describing modifications
    * @throws IOException if an ACL could not be modified
    * @throws UnsupportedOperationException if the operation is unsupported
    *         (default outcome).
@@ -3109,7 +3108,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
-   * @return List<String> of the XAttr names of the file or directory
+   * @return List{@literal <String>} of the XAttr names of the file or 
directory
    * @throws IOException IO failure
    * @throws UnsupportedOperationException if the operation is unsupported
    *         (default outcome).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index df89598..5f01d43 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -1474,8 +1474,8 @@ public class FileUtil {
    * @param inputClassPath String input classpath to bundle into the jar 
manifest
    * @param pwd Path to working directory to save jar
    * @param targetDir path to where the jar execution will have its working dir
-   * @param callerEnv Map<String, String> caller's environment variables to use
-   *   for expansion
+   * @param callerEnv Map {@literal <}String, String{@literal >} caller's
+   * environment variables to use for expansion
    * @return String[] with absolute path to new jar in position 0 and
    *   unexpanded wild card entry path in position 1
    * @throws IOException if there is an I/O error while writing the jar file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
index f6ffcb4..f7da819 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
@@ -83,7 +83,7 @@ public class HarFileSystem extends FileSystem {
 
   /**
    * Return the protocol scheme for the FileSystem.
-   * <p/>
+   * <p>
    *
    * @return <code>har</code>
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasEnhancedByteBufferAccess.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasEnhancedByteBufferAccess.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasEnhancedByteBufferAccess.java
index 982a0ef..8ceba7b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasEnhancedByteBufferAccess.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasEnhancedByteBufferAccess.java
@@ -52,18 +52,19 @@ public interface HasEnhancedByteBufferAccess {
    * @return
    *            We will always return an empty buffer if maxLength was 0,
    *            whether or not we are at EOF.
-   *            If maxLength > 0, we will return null if the stream has
-   *            reached EOF.
+   *            If maxLength &gt; 0, we will return null if the stream
+   *            has reached EOF.
    *            Otherwise, we will return a ByteBuffer containing at least one 
    *            byte.  You must free this ByteBuffer when you are done with it 
    *            by calling releaseBuffer on it.  The buffer will continue to be
    *            readable until it is released in this manner.  However, the
    *            input stream's close method may warn about unclosed buffers.
-   * @throws
-   *            IOException: if there was an error reading.
-   *            UnsupportedOperationException: if factory was null, and we
-   *            needed an external byte buffer.  UnsupportedOperationException
-   *            will never be thrown unless the factory argument is null.
+   * @throws    IOException if there was an error reading.
+   * @throws    UnsupportedOperationException  if factory was null,
+   *             and we needed an external byte buffer.
+   * @throws    UnsupportedOperationException  will never be thrown
+   *             unless the factory argument is null.
+   *
    */
   public ByteBuffer read(ByteBufferPool factory, int maxLength,
       EnumSet<ReadOption> opts)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
index a4b158a..5f266a7b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
@@ -241,9 +241,8 @@ public class LocalDirAllocator {
    *  @param pathStr the requested file (this will be searched)
    *  @param conf the Configuration object
    *  @return true if files exist. false otherwise
-   *  @throws IOException
    */
-  public boolean ifExists(String pathStr,Configuration conf) {
+  public boolean ifExists(String pathStr, Configuration conf) {
     AllocatorPerContext context = obtainContext(contextCfgItemName);
     return context.ifExists(pathStr, conf);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
index 538ccdf..c41190a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
@@ -54,7 +54,7 @@ public class LocalFileSystem extends ChecksumFileSystem {
 
   /**
    * Return the protocol scheme for the FileSystem.
-   * <p/>
+   * <p>
    *
    * @return <code>file</code>
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
index 5e93286..75bc12d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
@@ -290,7 +290,7 @@ public final class Options {
      * @param defaultOpt Default checksum option
      * @param userOpt User-specified checksum option. Ignored if null.
      * @param userBytesPerChecksum User-specified bytesPerChecksum
-     *                Ignored if < 0.
+     *                Ignored if {@literal <} 0.
      */
     public static ChecksumOpt processChecksumOpt(ChecksumOpt defaultOpt, 
         ChecksumOpt userOpt, int userBytesPerChecksum) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
index 9ad6a28..3472362 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
@@ -229,8 +229,8 @@ public class QuotaUsage {
 
   /**
    * Output format:
-   * <----12----> <----15----> <----15----> <----15----> <-------18------->
-   *    QUOTA   REMAINING_QUATA SPACE_QUOTA SPACE_QUOTA_REM FILE_NAME
+   * |----12----| |----15----| |----15----| |----15----| |-------18-------|
+   *    QUOTA   REMAINING_QUOTA SPACE_QUOTA SPACE_QUOTA_REM FILE_NAME
    */
   protected static final String QUOTA_STRING_FORMAT = "%12s %15s ";
   protected static final String SPACE_QUOTA_STRING_FORMAT = "%15s %15s ";
@@ -244,9 +244,9 @@ public class QuotaUsage {
 
   /**
    * Output format:
-   * <----12----> <------15-----> <------15-----> <------15----->
+   * |----12----| |------15-----| |------15-----| |------15-----|
    *        QUOTA       REM_QUOTA     SPACE_QUOTA REM_SPACE_QUOTA
-   * <----12----> <----12----> <-------18------->
+   * |----12----| |----12----| |-------18-------|
    *    DIR_COUNT   FILE_COUNT       CONTENT_SIZE
    */
   private static final String STORAGE_TYPE_SUMMARY_FORMAT = "%13s %17s ";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index 644cf4e..676c207 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -76,7 +76,7 @@ public class FTPFileSystem extends FileSystem {
 
   /**
    * Return the protocol scheme for the FileSystem.
-   * <p/>
+   * <p>
    *
    * @return <code>ftp</code>
    */
@@ -162,7 +162,7 @@ public class FTPFileSystem extends FileSystem {
   /**
    * Set FTP's transfer mode based on configuration. Valid values are
    * STREAM_TRANSFER_MODE, BLOCK_TRANSFER_MODE and COMPRESSED_TRANSFER_MODE.
-   * <p/>
+   * <p>
    * Defaults to BLOCK_TRANSFER_MODE.
    *
    * @param conf
@@ -195,7 +195,7 @@ public class FTPFileSystem extends FileSystem {
    * Set the FTPClient's data connection mode based on configuration. Valid
    * values are ACTIVE_LOCAL_DATA_CONNECTION_MODE,
    * PASSIVE_LOCAL_DATA_CONNECTION_MODE and 
PASSIVE_REMOTE_DATA_CONNECTION_MODE.
-   * <p/>
+   * <p>
    * Defaults to ACTIVE_LOCAL_DATA_CONNECTION_MODE.
    *
    * @param client

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FtpConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FtpConfigKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FtpConfigKeys.java
index e59efa5..b522102 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FtpConfigKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FtpConfigKeys.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.fs.ftp;
 import java.io.IOException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.ChecksumFileSystem;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.util.DataChecksum;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/LocalConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/LocalConfigKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/LocalConfigKeys.java
index 0b9e7455..e93858f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/LocalConfigKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/LocalConfigKeys.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.ChecksumFileSystem;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.util.DataChecksum;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java
index 131aa19..385fed2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java
@@ -69,7 +69,7 @@ public class AclStatus {
   /**
    * Returns the list of all ACL entries, ordered by their natural ordering.
    *
-   * @return List<AclEntry> unmodifiable ordered list of all ACL entries
+   * @return List&lt;AclEntry&gt; unmodifiable ordered list of all ACL entries
    */
   public List<AclEntry> getEntries() {
     return entries;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b57f2f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java
index 2811a89..4249252 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java
@@ -36,8 +36,8 @@ public final class AclUtil {
    * Given permissions and extended ACL entries, returns the full logical ACL.
    *
    * @param perm FsPermission containing permissions
-   * @param entries List<AclEntry> containing extended ACL entries
-   * @return List<AclEntry> containing full logical ACL
+   * @param entries List&lt;AclEntry&gt; containing extended ACL entries
+   * @return List&lt;AclEntry&gt; containing full logical ACL
    */
   public static List<AclEntry> getAclFromPermAndEntries(FsPermission perm,
       List<AclEntry> entries) {
@@ -93,8 +93,8 @@ public final class AclUtil {
    * Translates the given permission bits to the equivalent minimal ACL.
    *
    * @param perm FsPermission to translate
-   * @return List<AclEntry> containing exactly 3 entries representing the 
owner,
-   *   group and other permissions
+   * @return List&lt;AclEntry&gt; containing exactly 3 entries representing the
+   *         owner, group and other permissions
    */
   public static List<AclEntry> getMinimalAcl(FsPermission perm) {
     return Lists.newArrayList(
@@ -119,7 +119,7 @@ public final class AclUtil {
    * Checks if the given entries represent a minimal ACL (contains exactly 3
    * entries).
    *
-   * @param entries List<AclEntry> entries to check
+   * @param entries List&lt;AclEntry&gt; entries to check
    * @return boolean true if the entries represent a minimal ACL
    */
   public static boolean isMinimalAcl(List<AclEntry> entries) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to