This is an automated email from the ASF dual-hosted git repository.

pvary pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 431e7d9e54 HIVE-26092: Fix javadoc errors for the 4.0.0 release (Peter 
Vary reviewed by Zoltan Haindrich) (#3185)
431e7d9e54 is described below

commit 431e7d9e5431a808106d8db81e11aea74f040da5
Author: pvary <pv...@cloudera.com>
AuthorDate: Tue Apr 12 13:52:52 2022 +0200

    HIVE-26092: Fix javadoc errors for the 4.0.0 release (Peter Vary reviewed 
by Zoltan Haindrich) (#3185)
---
 Jenkinsfile                                        | 12 ++++++++
 .../format/datetime/HiveSqlDateTimeFormatter.java  | 32 +++++++++++-----------
 .../org/apache/hadoop/hive/common/type/Date.java   |  6 ++--
 .../apache/hadoop/hive/common/type/Timestamp.java  |  6 ++--
 .../apache/hive/common/util/TimestampParser.java   |  2 +-
 .../hive/benchmark/calcite/FieldTrimmerBench.java  |  6 ++--
 .../apache/hive/benchmark/hash/Murmur3Bench.java   | 10 +++----
 .../hive/benchmark/serde/LazySimpleSerDeBench.java |  6 ++--
 .../vectorization/VectorizedArithmeticBench.java   | 10 +++----
 .../vectorization/VectorizedComparisonBench.java   | 10 +++----
 .../vectorization/VectorizedLikeBench.java         | 10 +++----
 .../vectorization/VectorizedLogicBench.java        | 10 +++----
 .../hive/ql/qoption/QTestOptionDispatcher.java     |  2 +-
 .../org/apache/hive/jdbc/HiveBaseResultSet.java    |  4 +--
 .../apache/hive/jdbc/saml/IJdbcBrowserClient.java  |  2 +-
 .../org/apache/hadoop/hive/llap/io/api/LlapIo.java |  2 +-
 .../security/DefaultJwtSharedSecretProvider.java   |  2 +-
 .../tezplugins/metrics/LlapMetricsListener.java    |  2 +-
 .../org/apache/hadoop/hive/llap/LlapHiveUtils.java |  3 +-
 .../ql/ddl/table/info/desc/DescTableAnalyzer.java  |  6 ++--
 .../hadoop/hive/ql/exec/AddToClassPathAction.java  |  4 +--
 .../java/org/apache/hadoop/hive/ql/exec/Task.java  |  2 +-
 .../org/apache/hadoop/hive/ql/exec/Utilities.java  |  3 +-
 .../hive/ql/exec/WindowFunctionDescription.java    | 26 +++++++++---------
 .../hive/ql/exec/repl/OptimisedBootstrapUtils.java |  4 +--
 .../hadoop/hive/ql/exec/repl/ReplStatsTracker.java |  2 +-
 .../apache/hadoop/hive/ql/exec/tez/DagUtils.java   |  1 -
 .../expressions/CastDateToCharWithFormat.java      |  2 +-
 .../expressions/CastDateToStringWithFormat.java    |  2 +-
 .../expressions/CastDateToVarCharWithFormat.java   |  2 +-
 .../expressions/CastStringToDateWithFormat.java    |  2 +-
 .../CastStringToTimestampWithFormat.java           |  2 +-
 .../expressions/CastTimestampToCharWithFormat.java |  2 +-
 .../CastTimestampToStringWithFormat.java           |  2 +-
 .../CastTimestampToVarCharWithFormat.java          |  2 +-
 .../apache/hadoop/hive/ql/io/AcidInputFormat.java  |  2 +-
 .../hadoop/hive/ql/io/orc/encoded/StreamUtils.java |  1 -
 .../hive/ql/log/syslog/SyslogInputFormat.java      |  2 +-
 .../hadoop/hive/ql/log/syslog/SyslogParser.java    | 11 ++++----
 .../org/apache/hadoop/hive/ql/metadata/Hive.java   |  2 +-
 .../hive/ql/metadata/HiveStorageHandler.java       |  2 +-
 .../hive/ql/optimizer/ParallelEdgeFixer.java       |  2 +-
 .../hive/ql/optimizer/SemiJoinReductionMerge.java  |  6 ++--
 .../calcite/functions/HiveMergeableAggregate.java  |  2 +-
 .../calcite/rules/HiveAggregateSortLimitRule.java  |  2 +-
 .../ql/optimizer/calcite/rules/HiveDruidRules.java |  2 +-
 .../calcite/rules/HiveHepExtractRelNodeRule.java   |  3 +-
 .../HiveProjectSortExchangeTransposeRule.java      |  2 +-
 .../rules/HiveRewriteToDataSketchesRules.java      |  6 ++--
 ...regateInsertDeleteIncrementalRewritingRule.java | 14 +++++-----
 ...iveAggregateInsertIncrementalRewritingRule.java |  6 ++--
 ...AggregatePartitionIncrementalRewritingRule.java |  4 +--
 ...veJoinInsertDeleteIncrementalRewritingRule.java |  4 +--
 .../calcite/translator/RexNodeConverter.java       |  4 +--
 .../hive/ql/optimizer/topnkey/CommonKeyPrefix.java |  6 ++--
 .../ql/optimizer/topnkey/TopNKeyProcessor.java     |  2 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java       |  1 -
 .../hadoop/hive/ql/parse/UnparseTranslator.java    |  4 +--
 .../hadoop/hive/ql/parse/type/FunctionHelper.java  |  2 +-
 .../hive/ql/txn/compactor/CompactorThread.java     |  2 +-
 .../hive/ql/udf/generic/GenericUDAFRank.java       |  2 +-
 .../hive/ql/udf/generic/GenericUDFCastFormat.java  |  2 +-
 .../org/apache/hadoop/hive/serde2/JsonSerDe.java   |  4 +--
 .../hadoop/hive/serde2/json/BinaryEncoding.java    |  2 +-
 .../hadoop/hive/serde2/json/HiveJsonReader.java    |  6 ++--
 .../hive/service/cli/operation/QueryInfoCache.java |  2 +-
 .../org/apache/hadoop/hive/shims/HadoopShims.java  |  2 --
 .../hadoop/hive/metastore/HiveMetaStoreClient.java |  2 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java    | 11 ++------
 .../hadoop/hive/metastore/utils/FileUtils.java     |  1 -
 .../hadoop/hive/metastore/ExceptionHandler.java    |  4 +--
 .../metastore/IMetaStoreMetadataTransformer.java   | 12 ++++----
 .../hive/metastore/MetaStoreEventListener.java     |  2 +-
 .../org/apache/hadoop/hive/metastore/RawStore.java | 15 +++++-----
 .../jdbc/DerbySQLConnectorProvider.java            |  2 +-
 .../apache/hadoop/hive/metastore/txn/TxnStore.java |  2 +-
 76 files changed, 182 insertions(+), 184 deletions(-)

diff --git a/Jenkinsfile b/Jenkinsfile
index 8d16e60e9a..ade004a6c3 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -350,6 +350,18 @@ tar -xzf 
packaging/target/apache-hive-*-nightly-*-src.tar.gz
       }
     }
   }
+  branches['javadoc-check'] = {
+    executorNode {
+      stage('Prepare') {
+          loadWS();
+      }
+      stage('Generate javadoc') {
+          sh """#!/bin/bash -e
+mvn install javadoc:javadoc javadoc:aggregate -DskipTests -pl 
'!itests/hive-jmh,!itests/util'
+"""
+      }
+    }
+  }
   try {
     stage('Testing') {
       parallel branches
diff --git 
a/common/src/java/org/apache/hadoop/hive/common/format/datetime/HiveSqlDateTimeFormatter.java
 
b/common/src/java/org/apache/hadoop/hive/common/format/datetime/HiveSqlDateTimeFormatter.java
index 2b79e03c6f..00d4796f7b 100644
--- 
a/common/src/java/org/apache/hadoop/hive/common/format/datetime/HiveSqlDateTimeFormatter.java
+++ 
b/common/src/java/org/apache/hadoop/hive/common/format/datetime/HiveSqlDateTimeFormatter.java
@@ -176,7 +176,7 @@ import java.util.stream.Collectors;
  *   - Retains the exact format (capitalization and length) provided in the 
pattern string. If p.m.
  *     is in the pattern, we expect a.m. or p.m. in the output; if AM is in 
the pattern, we expect
  *     AM or PM in the output. If the case is mixed (Am or aM) then the output 
case will match the
- *     case of the pattern's first character (Am => AM, aM => am).
+ *     case of the pattern's first character (Am =&gt; AM, aM =&gt; am).
  * - String to datetime conversion:
  *   - Conflicts with HH24 and SSSSS.
  *   - It doesn't matter which meridian indicator is in the pattern.
@@ -253,21 +253,21 @@ import java.util.stream.Collectors;
  * - Output is right padded with trailing spaces unless the pattern is marked 
with the fill mode
  *   modifier (FM). Capitalization happens as follows:
  *   - If the first letter of the pattern is lowercase then the output is 
lowercase:
- *     'mONTH' -> 'may'
+ *     'mONTH' -&gt; 'may'
  *   - If the first two letters of the pattern are uppercase then the output 
is uppercase:
- *     'MOnth' -> 'MAY'
+ *     'MOnth' -&gt; 'MAY'
  *   - If the first letter of the pattern is uppercase and the second is 
lowercase then the output
- *     is capitalized: 'Month' -> 'May'.
+ *     is capitalized: 'Month' -&gt; 'May'.
  * - For string to datetime conversion, the case of the pattern does not 
matter.
  *
  * MONTH|Month|month
  * Name of month of year
  * - For datetime to string conversion, will include trailing spaces up to 
length 9 (length of
  *   longest month of year name: "September"). Case is taken into account 
according to the
- *   following example (pattern => output):
- *   - MONTH => JANUARY
- *   - Month => January
- *   - month => january
+ *   following example (pattern =&gt; output):
+ *   - MONTH =&gt; JANUARY
+ *   - Month =&gt; January
+ *   - month =&gt; january
  * - For string to datetime conversion, neither the case of the pattern nor 
the case of the input
  *   are taken into account.
  * - For string to datetime conversion, conflicts with MM and MON.
@@ -276,10 +276,10 @@ import java.util.stream.Collectors;
  * MON|Mon|mon
  * Abbreviated name of month of year
  * - For datetime to string conversion, case is taken into account according 
to the following
- *   example (pattern => output):
- *   - MON => JAN
- *   - Mon => Jan
- *   - mon => jan
+ *   example (pattern =&gt; output):
+ *   - MON =&gt; JAN
+ *   - Mon =&gt; Jan
+ *   - mon =&gt; jan
  * - For string to datetime conversion, neither the case of the pattern nor 
the case of the input
  *   are taken into account.
  * - For string to datetime conversion, conflicts with MM and MONTH.
@@ -289,7 +289,7 @@ import java.util.stream.Collectors;
  * Name of day of week
  * - For datetime to string conversion, will include trailing spaces until 
length is 9 (length of
  *   longest day of week name: "Wednesday"). Case is taken into account 
according to the following
- *   example (pattern => output):
+ *   example (pattern =&gt; output):
  *   - DAY = SUNDAY
  *   - Day = Sunday
  *   - day = sunday
@@ -300,7 +300,7 @@ import java.util.stream.Collectors;
  * DY|Dy|dy
  * Abbreviated name of day of week
  * - For datetime to string conversion, case is taken into account according 
to the following
- *   example (pattern => output):
+ *   example (pattern =&gt; output):
  *   - DY = SUN
  *   - Dy = Sun
  *   - dy = sun
@@ -329,11 +329,11 @@ import java.util.stream.Collectors;
  *   zone agnostic.
  *
  * C. Separators
- * -|.|/|,|'|;|:|<space>
+ * -|.|/|,|'|;|:|&lt;space&gt;
  * Separator
  * - Uses loose matching. Existence of a sequence of separators in the format 
should match the
  *   existence of a sequence of separators in the input regardless of the 
types of the separator or
- *   the length of the sequence where length > 1. E.g. input=“2019-. ;10/10”, 
pattern=“YYYY-MM-DD”
+ *   the length of the sequence where length &gt; 1. E.g. input=“2019-. 
;10/10”, pattern=“YYYY-MM-DD”
  *   is valid; input=“20191010”, pattern=“YYYY-MM-DD” is not valid.
  * - If the last separator character in the separator substring is "-" and is 
immediately followed
  *   by a time zone hour (tzh) token, it's a negative sign and not counted as 
a separator, UNLESS
diff --git a/common/src/java/org/apache/hadoop/hive/common/type/Date.java 
b/common/src/java/org/apache/hadoop/hive/common/type/Date.java
index 0a6de90bdf..13e710c5d8 100644
--- a/common/src/java/org/apache/hadoop/hive/common/type/Date.java
+++ b/common/src/java/org/apache/hadoop/hive/common/type/Date.java
@@ -39,7 +39,7 @@ import static java.time.temporal.ChronoField.YEAR;
 /**
  * This is the internal type for Date. The full qualified input format of Date
  * is "uuuu-MM-dd". For example: "2021-02-11".
- * <table border="2">
+ * <table border="2" summary="">
  * <tr>
  * <th>Field</th>
  * <th>Format</th>
@@ -73,8 +73,8 @@ import static java.time.temporal.ChronoField.YEAR;
  * positive.
  * </p>
  *
- * @see {@link ChronoField#YEAR}
- * @see {@link ChronoField#YEAR_OF_ERA}
+ * @see java.time.temporal.ChronoField#YEAR
+ * @see java.time.temporal.ChronoField#YEAR_OF_ERA
  */
 public class Date implements Comparable<Date> {
 
diff --git a/common/src/java/org/apache/hadoop/hive/common/type/Timestamp.java 
b/common/src/java/org/apache/hadoop/hive/common/type/Timestamp.java
index 3efdc1cdef..30b1074c93 100644
--- a/common/src/java/org/apache/hadoop/hive/common/type/Timestamp.java
+++ b/common/src/java/org/apache/hadoop/hive/common/type/Timestamp.java
@@ -44,7 +44,7 @@ import static java.time.temporal.ChronoField.YEAR;
  * Timestamp is "uuuu-MM-dd HH:mm:ss[.SSS...]", where the time part is 
optional.
  * If time part is absent, a default '00:00:00.0' will be used.
  *
- * <table border="2">
+ * <table border="2" summary="">
  * <tr>
  * <th>Field</th>
  * <th>Format</th>
@@ -78,8 +78,8 @@ import static java.time.temporal.ChronoField.YEAR;
  * positive.
  * </p>
  *
- * @see {@link ChronoField#YEAR}
- * @see {@link ChronoField#YEAR_OF_ERA}
+ * @see java.time.temporal.ChronoField#YEAR
+ * @see java.time.temporal.ChronoField#YEAR_OF_ERA
  */
 public class Timestamp implements Comparable<Timestamp> {
   
diff --git a/common/src/java/org/apache/hive/common/util/TimestampParser.java 
b/common/src/java/org/apache/hive/common/util/TimestampParser.java
index bb591fb63f..c8ffc6930c 100644
--- a/common/src/java/org/apache/hive/common/util/TimestampParser.java
+++ b/common/src/java/org/apache/hive/common/util/TimestampParser.java
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
  * In addition to accepting format patterns, this parser provides support for
  * three pre-defined formats:
  *
- * <table border="1">
+ * <table border="1" summary="">
  * <thead>
  * <tr>
  * <th>Formatter</th>
diff --git 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/calcite/FieldTrimmerBench.java
 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/calcite/FieldTrimmerBench.java
index d98e2511a1..435732c1ab 100644
--- 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/calcite/FieldTrimmerBench.java
+++ 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/calcite/FieldTrimmerBench.java
@@ -55,13 +55,13 @@ import org.openjdk.jmh.runner.options.OptionsBuilder;
 
 /**
  * This test measures the performance for field trimmer.
- * <p/>
+ * <p>
  * This test uses JMH framework for benchmarking.
  * You may execute this benchmark tool using JMH command line in different 
ways:
- * <p/>
+ * <p>
  * To use the settings shown in the main() function, use:
  * $ java -cp target/benchmarks.jar 
org.apache.hive.benchmark.calcite.FieldTrimmerBench
- * <p/>
+ * <p>
  * To use the default settings used by JMH, use:
  * $ java -jar target/benchmarks.jar 
org.apache.hive.benchmark.calcite.FieldTrimmerBench
  */
diff --git 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/hash/Murmur3Bench.java
 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/hash/Murmur3Bench.java
index cd85148ebb..f07420ae95 100644
--- 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/hash/Murmur3Bench.java
+++ 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/hash/Murmur3Bench.java
@@ -41,23 +41,23 @@ import org.openjdk.jmh.runner.options.OptionsBuilder;
 
 /**
  * This test measures the performance for vectorization.
- * <p/>
+ * <p>
  * This test uses JMH framework for benchmarking.
  * You may execute this benchmark tool using JMH command line in different 
ways:
- * <p/>
+ * <p>
  * To use the settings shown in the main() function, use:
  * $ java -cp target/benchmarks.jar org.apache.hive.benchmark.hash.Murmur3Bench
- * <p/>
+ * <p>
  * To use the default settings used by JMH, use:
  * $ java -jar target/benchmarks.jar 
org.apache.hive.benchmark.hash.Murmur3Bench
- * <p/>
+ * <p>
  * To specify different parameters, use:
  * - This command will use 10 warm-up iterations, 5 test iterations, and 2 
forks. And it will
  * display the Average Time (avgt) in Microseconds (us)
  * - Benchmark mode. Available modes are:
  * [Throughput/thrpt, AverageTime/avgt, SampleTime/sample, SingleShotTime/ss, 
All/all]
  * - Output time unit. Available time units are: [m, s, ms, us, ns].
- * <p/>
+ * <p>
  * $ java -jar target/benchmarks.jar 
org.apache.hive.benchmark.hash.Murmur3Bench
  * -wi 10 -i 5 -f 2 -bm avgt -tu us
  */
diff --git 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/serde/LazySimpleSerDeBench.java
 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/serde/LazySimpleSerDeBench.java
index 5dbc571df5..43c148fe0d 100644
--- 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/serde/LazySimpleSerDeBench.java
+++ 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/serde/LazySimpleSerDeBench.java
@@ -50,13 +50,13 @@ import org.openjdk.jmh.runner.options.OptionsBuilder;
 public class LazySimpleSerDeBench {
   /**
    * This test measures the performance for LazySimpleSerDe.
-   * <p/>
+   * <p>
    * This test uses JMH framework for benchmarking. You may execute this
    * benchmark tool using JMH command line in different ways:
-   * <p/>
+   * <p>
    * To run using default settings, use: 
    * $ java -cp target/benchmarks.jar 
org.apache.hive.benchmark.serde.LazySimpleSerDeBench
-   * <p/>
+   * <p>
    */
   public static final int DEFAULT_ITER_TIME = 1000000;
   public static final int DEFAULT_DATA_SIZE = 4096;
diff --git 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedArithmeticBench.java
 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedArithmeticBench.java
index 70ee9b7ddb..6e07d0f360 100644
--- 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedArithmeticBench.java
+++ 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedArithmeticBench.java
@@ -30,23 +30,23 @@ import org.openjdk.jmh.runner.options.OptionsBuilder;
 
 /**
  * This test measures the performance for vectorization.
- * <p/>
+ * <p>
  * This test uses JMH framework for benchmarking.
  * You may execute this benchmark tool using JMH command line in different 
ways:
- * <p/>
+ * <p>
  * To use the settings shown in the main() function, use:
  * $ java -cp target/benchmarks.jar 
org.apache.hive.benchmark.vectorization.VectorizedArithmeticBench
- * <p/>
+ * <p>
  * To use the default settings used by JMH, use:
  * $ java -jar target/benchmarks.jar 
org.apache.hive.benchmark.vectorization.VectorizedArithmeticBench
- * <p/>
+ * <p>
  * To specify different parameters, use:
  * - This command will use 10 warm-up iterations, 5 test iterations, and 2 
forks. And it will
  * display the Average Time (avgt) in Microseconds (us)
  * - Benchmark mode. Available modes are:
  * [Throughput/thrpt, AverageTime/avgt, SampleTime/sample, SingleShotTime/ss, 
All/all]
  * - Output time unit. Available time units are: [m, s, ms, us, ns].
- * <p/>
+ * <p>
  * $ java -jar target/benchmarks.jar 
org.apache.hive.benchmark.vectorization.VectorizedArithmeticBench
  * -wi 10 -i 5 -f 2 -bm avgt -tu us
  */
diff --git 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedComparisonBench.java
 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedComparisonBench.java
index d54d1fa115..cfec3c8d4f 100644
--- 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedComparisonBench.java
+++ 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedComparisonBench.java
@@ -24,23 +24,23 @@ import org.openjdk.jmh.runner.options.OptionsBuilder;
 
 /**
  * This test measures the performance for vectorization.
- * <p/>
+ * <p>
  * This test uses JMH framework for benchmarking.
  * You may execute this benchmark tool using JMH command line in different 
ways:
- * <p/>
+ * <p>
  * To use the settings shown in the main() function, use:
  * $ java -cp target/benchmarks.jar 
org.apache.hive.benchmark.vectorization.VectorizedComparisonBench
- * <p/>
+ * <p>
  * To use the default settings used by JMH, use:
  * $ java -jar target/benchmarks.jar 
org.apache.hive.benchmark.vectorization.VectorizedComparisonBench
- * <p/>
+ * <p>
  * To specify different parameters, use:
  * - This command will use 10 warm-up iterations, 5 test iterations, and 2 
forks. And it will
  * display the Average Time (avgt) in Microseconds (us)
  * - Benchmark mode. Available modes are:
  * [Throughput/thrpt, AverageTime/avgt, SampleTime/sample, SingleShotTime/ss, 
All/all]
  * - Output time unit. Available time units are: [m, s, ms, us, ns].
- * <p/>
+ * <p>
  * $ java -jar target/benchmarks.jar 
org.apache.hive.benchmark.vectorization.VectorizedComparisonBench
  * -wi 10 -i 5 -f 2 -bm avgt -tu us
  */
diff --git 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLikeBench.java
 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLikeBench.java
index 8b1b045917..30f3992165 100644
--- 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLikeBench.java
+++ 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLikeBench.java
@@ -29,23 +29,23 @@ import org.openjdk.jmh.runner.options.OptionsBuilder;
 
 /**
  * This test measures the performance for vectorization.
- * <p/>
+ * <p>
  * This test uses JMH framework for benchmarking.
  * You may execute this benchmark tool using JMH command line in different 
ways:
- * <p/>
+ * <p>
  * To use the settings shown in the main() function, use:
  * $ java -cp target/benchmarks.jar 
org.apache.hive.benchmark.vectorization.VectorizedLikeBench
- * <p/>
+ * <p>
  * To use the default settings used by JMH, use:
  * $ java -jar target/benchmarks.jar 
org.apache.hive.benchmark.vectorization.VectorizedLikeBench
- * <p/>
+ * <p>
  * To specify different parameters, use:
  * - This command will use 10 warm-up iterations, 5 test iterations, and 2 
forks. And it will
  * display the Average Time (avgt) in Microseconds (us)
  * - Benchmark mode. Available modes are:
  * [Throughput/thrpt, AverageTime/avgt, SampleTime/sample, SingleShotTime/ss, 
All/all]
  * - Output time unit. Available time units are: [m, s, ms, us, ns].
- * <p/>
+ * <p>
  * $ java -jar target/benchmarks.jar 
org.apache.hive.benchmark.vectorization.VectorizedLikeBench
  * -wi 10 -i 5 -f 2 -bm avgt -tu us
  */
diff --git 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
index bf2f4b4e53..0dede7e19a 100644
--- 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
+++ 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
@@ -27,23 +27,23 @@ import org.openjdk.jmh.runner.options.OptionsBuilder;
 
 /**
  * This test measures the performance for vectorization.
- * <p/>
+ * <p>
  * This test uses JMH framework for benchmarking.
  * You may execute this benchmark tool using JMH command line in different 
ways:
- * <p/>
+ * <p>
  * To use the settings shown in the main() function, use:
  * $ java -cp target/benchmarks.jar 
org.apache.hive.benchmark.vectorization.VectorizedLogicBench
- * <p/>
+ * <p>
  * To use the default settings used by JMH, use:
  * $ java -jar target/benchmarks.jar 
org.apache.hive.benchmark.vectorization.VectorizedLogicBench
- * <p/>
+ * <p>
  * To specify different parameters, use:
  * - This command will use 10 warm-up iterations, 5 test iterations, and 2 
forks. And it will
  * display the Average Time (avgt) in Microseconds (us)
  * - Benchmark mode. Available modes are:
  * [Throughput/thrpt, AverageTime/avgt, SampleTime/sample, SingleShotTime/ss, 
All/all]
  * - Output time unit. Available time units are: [m, s, ms, us, ns].
- * <p/>
+ * <p>
  * $ java -jar target/benchmarks.jar 
org.apache.hive.benchmark.vectorization.VectorizedLogicBench
  * -wi 10 -i 5 -f 2 -bm avgt -tu us
  */
diff --git 
a/itests/util/src/main/java/org/apache/hadoop/hive/ql/qoption/QTestOptionDispatcher.java
 
b/itests/util/src/main/java/org/apache/hadoop/hive/ql/qoption/QTestOptionDispatcher.java
index 64e451690e..75939a4638 100644
--- 
a/itests/util/src/main/java/org/apache/hadoop/hive/ql/qoption/QTestOptionDispatcher.java
+++ 
b/itests/util/src/main/java/org/apache/hadoop/hive/ql/qoption/QTestOptionDispatcher.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hive.ql.QTestUtil;
  * Provides facilities to invoke {@link QTestOptionHandler}-s.
  *
  * Enables to dispatch option arguments to a specific option handler.
- * The option invocation format is '--! qt:<optionName>:<optionArgs>
+ * The option invocation format is '--! 
qt:&lt;optionName&gt;:&lt;optionArgs&gt;
  *
  * Please refer to specific implementations of {@link QTestOptionHandler} for 
more detailed information about them.
  */
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java 
b/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
index 45de932740..0ee0027d8c 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
@@ -222,7 +222,7 @@ public abstract class HiveBaseResultSet implements 
ResultSet {
    * @param columnIndex the first column is 1, the second is 2, ...
    * @return the column value; if the value is SQL NULL, the value returned is
    *         false
-   * @throws if the columnIndex is not valid; if a database access error occurs
+   * @throws SQLException if the columnIndex is not valid; if a database 
access error occurs
    *           or this method is called on a closed result set
    * @see ResultSet#getBoolean(int)
    */
@@ -261,7 +261,7 @@ public abstract class HiveBaseResultSet implements 
ResultSet {
    *          the name of the column
    * @return the column value; if the value is SQL NULL, the value returned is
    *         false
-   * @throws if the columnIndex is not valid; if a database access error occurs
+   * @throws SQLException if the columnIndex is not valid; if a database 
access error occurs
    *           or this method is called on a closed result set
    * @see ResultSet#getBoolean(String)
    */
diff --git a/jdbc/src/java/org/apache/hive/jdbc/saml/IJdbcBrowserClient.java 
b/jdbc/src/java/org/apache/hive/jdbc/saml/IJdbcBrowserClient.java
index 6bed4204da..a612583830 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/saml/IJdbcBrowserClient.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/saml/IJdbcBrowserClient.java
@@ -53,7 +53,7 @@ public interface IJdbcBrowserClient extends Closeable {
   /**
    * Initializes the browser client context. The client context contains a 
client
    * identifier which must be used to set the http header with key
-   * {@link HiveSamlUtils.SSO_CLIENT_IDENTIFIER}.
+   * {@link HiveSamlUtils#SSO_CLIENT_IDENTIFIER}.
    */
   void init(JdbcBrowserClientContext context);
 
diff --git 
a/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java 
b/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java
index 3d6258f29e..a4fc13a0ee 100644
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/io/api/LlapIo.java
@@ -56,7 +56,7 @@ public interface LlapIo<T> {
    * @param tag a CacheTag instance must be provided as that's needed for 
cache insertion
    * @param fileKey fileId of the ORC file (either the Long fileId of HDFS or 
the SyntheticFileId).
    *                Optional, if it is not provided, it will be generated, see:
-   *                {@link org.apache.hadoop.hive.ql.io.HdfsUtils.getFileId()}
+   *                org.apache.hadoop.hive.ql.io.HdfsUtils#getFileId()
    * @return The tail of the ORC file
    * @throws IOException ex
    */
diff --git 
a/llap-common/src/java/org/apache/hadoop/hive/llap/security/DefaultJwtSharedSecretProvider.java
 
b/llap-common/src/java/org/apache/hadoop/hive/llap/security/DefaultJwtSharedSecretProvider.java
index dca8afc2c6..68f936ff5b 100644
--- 
a/llap-common/src/java/org/apache/hadoop/hive/llap/security/DefaultJwtSharedSecretProvider.java
+++ 
b/llap-common/src/java/org/apache/hadoop/hive/llap/security/DefaultJwtSharedSecretProvider.java
@@ -43,7 +43,7 @@ import static 
org.apache.hadoop.hive.conf.HiveConf.ConfVars.LLAP_EXTERNAL_CLIENT
  *
  * If secret is not found even after 1) and 2), {@link #init(Configuration)} 
methods throws {@link IllegalStateException}.
  *
- * Length of shared secret provided in 1) or 2) should be > 32 bytes.
+ * Length of shared secret provided in 1) or 2) should be &gt; 32 bytes.
  *
  * It uses the same encryption and decryption secret which can be used to sign 
and verify JWT.
  */
diff --git 
a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapMetricsListener.java
 
b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapMetricsListener.java
index 446100b415..b42434eb52 100644
--- 
a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapMetricsListener.java
+++ 
b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapMetricsListener.java
@@ -41,7 +41,7 @@ public interface LlapMetricsListener {
 
   /**
    * Handler will be called when new data is arrived for every active Llap 
Daemon in the cluster.
-   * @param newMetrics The map of the worker indentity -> metrics
+   * @param newMetrics The map of the worker indentity -&gt; metrics
    */
   void newClusterMetrics(Map<String, LlapMetrics> newMetrics);
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/llap/LlapHiveUtils.java 
b/ql/src/java/org/apache/hadoop/hive/llap/LlapHiveUtils.java
index c988fd81a0..52126d971c 100644
--- a/ql/src/java/org/apache/hadoop/hive/llap/LlapHiveUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/llap/LlapHiveUtils.java
@@ -52,7 +52,7 @@ public final class LlapHiveUtils {
   }
 
   /**
-   * Takes a Path and looks up the PartitionDesc instance associated with it 
in a map of Path->PartitionDesc entries.
+   * Takes a Path and looks up the PartitionDesc instance associated with it 
in a map of Path-&gt;PartitionDesc entries.
    * If it is not found (e.g. Path denotes a partition path, but map contains 
table level instances only) we will try
    * to do the same with the parent of this path, traversing up until there's 
a match, if any.
    * @param path the absolute path used for the look up
@@ -104,7 +104,6 @@ public final class LlapHiveUtils {
    * Returns MapWork based what is serialized in the JobConf instance provided.
    * @param job
    * @return the MapWork instance. Might be null if missing.
-   * @throws HiveException
    */
   public static MapWork findMapWork(JobConf job) {
     String inputName = job.get(Utilities.INPUT_NAME, null);
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableAnalyzer.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableAnalyzer.java
index 0a1a99ab16..fb2e80b863 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableAnalyzer.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/DescTableAnalyzer.java
@@ -46,14 +46,14 @@ import org.apache.hadoop.hive.ql.parse.SemanticException;
  * A query like this will generate a tree as follows
  *   "describe formatted default.maptable partition (b=100) id;"
  * TOK_TABTYPE
- *   TOK_TABNAME --> root for tablename, 2 child nodes mean DB specified
+ *   TOK_TABNAME --&gt; root for tablename, 2 child nodes mean DB specified
  *     default
  *     maptable
- *   TOK_PARTSPEC  --> root node for partition spec. else columnName
+ *   TOK_PARTSPEC  --&gt; root node for partition spec. else columnName
  *     TOK_PARTVAL
  *       b
  *       100
- *   id           --> root node for columnName
+ *   id           --&gt; root node for columnName
  * formatted
  */
 @DDLType(types = HiveParser.TOK_DESCTABLE)
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/AddToClassPathAction.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/AddToClassPathAction.java
index a2ffbb1f24..b5bcbe90cc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AddToClassPathAction.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AddToClassPathAction.java
@@ -26,8 +26,8 @@ import java.util.Objects;
 
 /**
  * Helper class to create UDFClassLoader when running under a security 
manager. To create a class loader:
- * > AddToClassPathAction addAction = new AddToClassPathAction(parentLoader, 
newPaths, true);
- * > UDFClassLoader childClassLoader = 
AccessController.doPrivileged(addAction);
+ * &gt; AddToClassPathAction addAction = new 
AddToClassPathAction(parentLoader, newPaths, true);
+ * &gt; UDFClassLoader childClassLoader = 
AccessController.doPrivileged(addAction);
  * To try to add to the class path of the existing class loader; call the 
above without forceNewClassLoader=true.
  * Note that a class loader might be still created as fallback method.
  * <p>
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
index ba1decdc61..19bb543c21 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
@@ -142,7 +142,7 @@ public abstract class Task<T extends Serializable> 
implements Serializable, Node
   protected List<Task<?>> parentTasks;
   /**
    * this can be set by the Task, to provide more info about the failure in 
TaskResult
-   * where the Driver can find it.  This is checked if {@link 
Task#execute(org.apache.hadoop.hive.ql.TaskQueue)}
+   * where the Driver can find it.  This is checked if {@link Task#execute()}
    * returns non-0 code.
    */
   private Throwable exception;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 1f12d0cb76..09ff212923 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -1127,7 +1127,8 @@ public final class Utilities {
    * @param destFileName
    *          the target filename
    * @return The final path the file was moved to.
-   * @throws IOException, HiveException
+   * @throws IOException
+   * @throws HiveException
    */
   public static Path moveFile(FileSystem fs, Path srcFile, Path destDir, 
String destFileName)
       throws IOException, HiveException {
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/WindowFunctionDescription.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/WindowFunctionDescription.java
index 60932be297..bef82f25fd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/WindowFunctionDescription.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/WindowFunctionDescription.java
@@ -63,23 +63,23 @@ public @interface WindowFunctionDescription {
 
   /**
    * This property specifies whether the UDAF is an Ordered-set aggregate 
function.
-   * <ordered-set aggregate functions> ::=
-   *   <hypothetical set function> |
-   *   <inverse distribution function>
+   * &lt;ordered-set aggregate functions&gt; ::=
+   *   &lt;hypothetical set function&gt; |
+   *   &lt;inverse distribution function&gt;
    *
-   * <hypothetical set function> ::=
-   *   <rank function type> <left paren>
-   *   <hypothetical set function value expression list> <right paren>
-   *   <within group specification>
+   * &lt;hypothetical set function&gt; ::=
+   *   &lt;rank function type&gt; &lt;left paren&gt;
+   *   &lt;hypothetical set function value expression list&gt; &lt;right 
paren&gt;
+   *   &lt;within group specification&gt;
    *
-   * <rank function type> ::= RANK | DENSE_RANK | PERCENT_RANK | CUME_DIST
+   * &lt;rank function type&gt; ::= RANK | DENSE_RANK | PERCENT_RANK | 
CUME_DIST
    *
-   * <inverse distribution function> ::=
-   *   <inverse distribution function type> <left paren>
-   *   <inverse distribution function argument> <right paren>
-   *   <within group specification>
+   * &lt;inverse distribution function&gt; ::=
+   *   &lt;inverse distribution function type&gt; &lt;left paren&gt;
+   *   &lt;inverse distribution function argument&gt; &lt;right paren&gt;
+   *   &lt;within group specification&gt;
    *
-   * <inverse distribution function type> ::= PERCENTILE_CONT | PERCENTILE_DISC
+   * &lt;inverse distribution function type&gt; ::= PERCENTILE_CONT | 
PERCENTILE_DISC
    *
    * @return true if the function can be used as an ordered-set aggregate
    */
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/OptimisedBootstrapUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/OptimisedBootstrapUtils.java
index 002f635cc0..f3aa530283 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/OptimisedBootstrapUtils.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/OptimisedBootstrapUtils.java
@@ -74,7 +74,7 @@ public class OptimisedBootstrapUtils {
   public static final String BOOTSTRAP_TABLES_LIST = 
"_failover_bootstrap_table_list";
 
   /**
-   * Gets & checks whether the database is target of replication.
+   * Gets &amp; checks whether the database is target of replication.
    * @param dbName name of database
    * @param hive hive object
    * @return true, if the database has repl.target.for property set.
@@ -91,7 +91,7 @@ public class OptimisedBootstrapUtils {
   }
 
   /**
-   * Gets the source & target event id  from the event ack file
+   * Gets the source &amp; target event id  from the event ack file
    * @param dumpPath the dump path
    * @param conf the hive configuration
    * @return the event id from file.
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStatsTracker.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStatsTracker.java
index 06c147ce71..023c788aff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStatsTracker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStatsTracker.java
@@ -107,7 +107,7 @@ public class ReplStatsTracker {
 
   /**
    * Get the DescriptiveStatistics for each event type.
-   * @return A HashMap, with key as event type & value as the 
DescriptiveAnalytics of the entire run.
+   * @return A HashMap, with key as event type &amp; value as the 
DescriptiveAnalytics of the entire run.
    */
   public ConcurrentHashMap<String, DescriptiveStatistics> getDescMap() {
     return descMap;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
index 7622bccae5..d8ac63acea 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
@@ -1260,7 +1260,6 @@ public class DagUtils {
    * @param inputOutputJars The file names to localize.
    * @return Map&lt;String, LocalResource&gt; (srcPath, local resources) to 
add to execution
    * @throws IOException when hdfs operation fails.
-   * @throws LoginException when getDefaultDestDir fails with the same 
exception
    */
   public Map<String, LocalResource> localizeTempFiles(String hdfsDirPathStr, 
Configuration conf,
       String[] inputOutputJars, String[] skipJars) throws IOException {
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToCharWithFormat.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToCharWithFormat.java
index 17afbc3b2b..7ed9902a98 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToCharWithFormat.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToCharWithFormat.java
@@ -24,7 +24,7 @@ import 
org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import java.nio.charset.StandardCharsets;
 
 /**
- * Vectorized UDF for CAST (<DATE> TO CHAR(<LENGTH>) WITH FORMAT <STRING>).
+ * Vectorized UDF for CAST (&lt;DATE&gt; TO CHAR(&lt;LENGTH&gt;) WITH FORMAT 
&lt;STRING&gt;).
  */
 public class CastDateToCharWithFormat extends CastDateToChar {
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToStringWithFormat.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToStringWithFormat.java
index acfa2ba27c..aec5c12721 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToStringWithFormat.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToStringWithFormat.java
@@ -24,7 +24,7 @@ import 
org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import java.nio.charset.StandardCharsets;
 
 /**
- * Vectorized UDF for CAST (<DATE> TO STRING WITH FORMAT <STRING>).
+ * Vectorized UDF for CAST (&lt;DATE&gt; TO STRING WITH FORMAT &lt;STRING&gt;).
  */
 public class CastDateToStringWithFormat extends CastDateToString {
   private static final long serialVersionUID = 1L;
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToVarCharWithFormat.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToVarCharWithFormat.java
index 6ed44a6d57..9cf28fd67d 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToVarCharWithFormat.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDateToVarCharWithFormat.java
@@ -24,7 +24,7 @@ import 
org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import java.nio.charset.StandardCharsets;
 
 /**
- * Vectorized UDF for CAST (<DATE> TO VARCHAR(<LENGTH>) WITH FORMAT <STRING>).
+ * Vectorized UDF for CAST (&lt;DATE&gt; TO VARCHAR(&lt;LENGTH&gt;) WITH 
FORMAT &lt;STRING&gt;).
  */
 public class CastDateToVarCharWithFormat extends CastDateToVarChar {
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDateWithFormat.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDateWithFormat.java
index b50ed0e70c..a2bcd56c6f 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDateWithFormat.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToDateWithFormat.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.serde2.io.DateWritableV2;
 import java.nio.charset.StandardCharsets;
 
 /**
- * Vectorized UDF for CAST (<STRING> TO DATE WITH FORMAT <STRING>).
+ * Vectorized UDF for CAST (&lt;STRING&gt; TO DATE WITH FORMAT &lt;STRING&gt;).
  */
 public class CastStringToDateWithFormat extends CastStringToDate {
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToTimestampWithFormat.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToTimestampWithFormat.java
index 9361e77de8..407249570d 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToTimestampWithFormat.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastStringToTimestampWithFormat.java
@@ -26,7 +26,7 @@ import 
org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
 import java.nio.charset.StandardCharsets;
 
 /**
- * Vectorized UDF for CAST (<STRING> TO TIMESTAMP WITH FORMAT <STRING>).
+ * Vectorized UDF for CAST (&lt;STRING&gt; TO TIMESTAMP WITH FORMAT 
&lt;STRING&gt;).
  */
 public class CastStringToTimestampWithFormat extends CastStringToTimestamp {
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToCharWithFormat.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToCharWithFormat.java
index 5472a7e9de..bcb2182b1f 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToCharWithFormat.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToCharWithFormat.java
@@ -25,7 +25,7 @@ import 
org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
 import java.nio.charset.StandardCharsets;
 
 /**
- * Vectorized UDF for CAST (<TIMESTAMP> TO CHAR(<LENGTH>) WITH FORMAT 
<STRING>).
+ * Vectorized UDF for CAST (&lt;TIMESTAMP&gt; TO CHAR(&lt;LENGTH&gt;) WITH 
FORMAT &lt;STRING&gt;).
  */
 public class CastTimestampToCharWithFormat extends CastTimestampToChar {
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToStringWithFormat.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToStringWithFormat.java
index 148995d0cf..bc748d7fe9 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToStringWithFormat.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToStringWithFormat.java
@@ -25,7 +25,7 @@ import 
org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
 import java.nio.charset.StandardCharsets;
 
 /**
- * Vectorized UDF for CAST (<TIMESTAMP> TO STRING WITH FORMAT <STRING>).
+ * Vectorized UDF for CAST (&lt;TIMESTAMP&gt; TO STRING WITH FORMAT 
&lt;STRING&gt;).
  */
 public class CastTimestampToStringWithFormat extends CastTimestampToString {
   private static final long serialVersionUID = 1L;
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToVarCharWithFormat.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToVarCharWithFormat.java
index a0569e1023..7062be7bfd 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToVarCharWithFormat.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastTimestampToVarCharWithFormat.java
@@ -25,7 +25,7 @@ import 
org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
 import java.nio.charset.StandardCharsets;
 
 /**
- * Vectorized UDF for CAST (<TIMESTAMP> TO VARCHAR(<LENGTH>) WITH FORMAT 
<STRING>).
+ * Vectorized UDF for CAST (&lt;TIMESTAMP&gt; TO VARCHAR(&lt;LENGTH&gt;) WITH 
FORMAT &lt;STRING&gt;).
  */
 public class CastTimestampToVarCharWithFormat extends CastTimestampToVarChar {
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
index ea1c6ce088..c23956c759 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
@@ -137,7 +137,7 @@ public interface AcidInputFormat<KEY extends 
WritableComparable, VALUE>
     /**
      * @param minWriteId min writeId of the delta directory
      * @param maxWriteId max writeId of the delta directory
-     * @param stmtIds delta dir suffixes when a single txn writes > 1 delta in 
the same partition
+     * @param stmtIds delta dir suffixes when a single txn writes &gt; 1 delta 
in the same partition
      * @param visibilityTxnId maybe 0, if the dir name didn't have it.  
txnid:0 is always visible
      * @param deltaFiles bucketFiles in the directory
      */
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/StreamUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/StreamUtils.java
index 00433c6004..b2323b8653 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/StreamUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/StreamUtils.java
@@ -37,7 +37,6 @@ public class StreamUtils {
    * @param streamName - stream name
    * @param streamBuffer - stream buffer
    * @return - SettableUncompressedStream
-   * @throws IOException
    */
   public static SettableUncompressedStream 
createSettableUncompressedStream(String streamName,
       ColumnStreamData streamBuffer) {
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogInputFormat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogInputFormat.java
index 69b66dd25f..810788104a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogInputFormat.java
@@ -83,7 +83,7 @@ import com.google.common.annotations.VisibleForTesting;
  *   If a filename is 2019-04-02-21-00_0.log.gz and timeslice is 300s then the 
file 2019-04-02-21-00_0.log.gz is
  *   expected to have log lines from timestamp 2019:04:02 21:00:00 to 
2019:04:02 21:05:00 timestamp.
  * - Logs table should have 'ts' as timestamp column.
- * - Only simple BETWEEN filter predicate is supported for 'ts' column. There 
cannot be >1 predicates on 'ts' column.
+ * - Only simple BETWEEN filter predicate is supported for 'ts' column. There 
cannot be &gt;1 predicates on 'ts' column.
  */
 public class SyslogInputFormat extends TextInputFormat {
   private static final Logger LOG = 
LoggerFactory.getLogger(SyslogInputFormat.class);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogParser.java 
b/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogParser.java
index 8b700dc5ef..66ed2e53f0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogParser.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/log/syslog/SyslogParser.java
@@ -44,18 +44,18 @@ import org.apache.hadoop.hive.common.type.Timestamp;
  * about the way hive logs using syslog format (specifically RFC5424).
  *
  * This implementation also parses structured data, returns all parsed fields 
as map and also un-escapes messages.
- * This parser also gracefully handles some corner cases where 'msg' can be 
empty or line can start with '<' but not
+ * This parser also gracefully handles some corner cases where 'msg' can be 
empty or line can start with '&lt;' but not
  * a valid RFC5424 format etc.
  *
  * Assumption:
  * 1) This parser assumes the linebreaks '\n' in stack traces for example are 
replaced by '\r' to make single
  * line message. The reader will do replacement of '\r' with '\n' at the time 
of read.
  * 2) This parser assumes structured data values are html escaped. So it will 
html unescape when parsing structured
- * data. (hive writes log lines directly to stderr that look like rfc5424 
layout starting with '<' so the expectation
+ * data. (hive writes log lines directly to stderr that look like rfc5424 
layout starting with '&lt;' so the expectation
  * from log4j2 is to escape those lines using html escaping).
- * 3) Read event returns List<Object> conforming to sys.logs table schema in 
hive. The schema for sys.logs table is
+ * 3) Read event returns List&lt;Object&gt; conforming to sys.logs table 
schema in hive. The schema for sys.logs table is
  * expected to be (facility STRING, severity STRING, version STRING, ts 
TIMESTAMP, hostname STRING, app_name STRING,
- * proc_id STRING, msg_id STRING, structured_data map<STRING,STRING>, msg 
BINARY, unmatched BINARY)
+ * proc_id STRING, msg_id STRING, structured_data map&lt;STRING,STRING&gt;, 
msg BINARY, unmatched BINARY)
  * 4) Timestamps are in UTC
  *
  * This parser is tested with Log4j2's RFC5424 layout generated using the 
following properties
@@ -161,8 +161,7 @@ public class SyslogParser implements Closeable {
    * Read the next Syslog message from the stream.
    *
    * @return a parsed map of object, or null on EOF.
-   * @throw EOFException if EOF is found in an inappropriate place.
-   * @throw IOException if the underlying stream fails, or unexpected
+   * @throws IOException if the underlying stream fails, or unexpected
    * bytes are seen.
    */
   public List<Object> readEvent() throws IOException {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 48e859b8c8..23c6d85d03 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -5038,7 +5038,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
    * @param isSrcLocal true if source is on local file system
    * @param isAcidIUD true if this is an ACID based Insert/Update/Delete
    * @param isOverwrite if true, then overwrite if destination file exist, 
else add a duplicate copy
-   * @param newFiles if this is non-null, a list of files that were created as 
a result of this
+   * @param newFilesStatus if this is non-null, a list of files that were 
created as a result of this
    *                 move will be returned.
    * @param isManaged if table is managed.
    * @param isCompactionTable is table used in query-based compaction
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
index 7d7a01234f..4fcc288050 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
@@ -213,7 +213,7 @@ public interface HiveStorageHandler extends Configurable {
    *
    * @param operatorDesc operatorDesc
    * @param initialProps Map containing initial operator properties
-   * @return Map<String, String> containing additional operator specific 
information from storage handler
+   * @return Map&lt;String, String&gt; containing additional operator specific 
information from storage handler
    * OR `initialProps` if the storage handler choose to not provide any such 
information.
    */
   default Map<String, String> getOperatorDescProperties(OperatorDesc 
operatorDesc, Map<String, String> initialProps) {
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ParallelEdgeFixer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ParallelEdgeFixer.java
index 55d338f2b5..350c3f0cc4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ParallelEdgeFixer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ParallelEdgeFixer.java
@@ -75,7 +75,7 @@ import com.google.common.collect.Lists;
  *       |        |                       |        |
  *       |        |                       |   +--- | ---+
  *       |        |                       |   | +-----+ |
- *       |        |         >>>>          |   | |RS_T | |
+ *       |        |         $gt;          |   | |RS_T | |
  *       |        |                       |   | +-----+ |
  *       |        |                       |   +--- | ---+
  *       |        |                       |        |
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SemiJoinReductionMerge.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SemiJoinReductionMerge.java
index 34531591dc..f78e0c3dbc 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SemiJoinReductionMerge.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SemiJoinReductionMerge.java
@@ -86,21 +86,19 @@ import static 
org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils.and;
  *   <li>Filter creation from the source relation and broadcast: SOURCE - SEL 
- GB1 - RS1 - GB2 - RS2</li>
  *   <li>Filter application on the target relation: TS - 
FIL[in_bloom(col)]</li>
  * </ul>
- * </p>
  * <p>
  * An example of the transformation on three single column semi join reducers 
is shown below. The plan is simplified for
  * presentation purposes.
  * <h3>BEFORE:</h3>
  * <pre>
  *        / SEL[fname] - GB1 - RS1 - GB2 - RS2  \
- * SOURCE - SEL[lname] - GB1 - RS1 - GB2 - RS2  -> TS[Author] - 
FIL[in_bloom(fname) ^ in_bloom(lname) ^ in_bloom(age)]
+ * SOURCE - SEL[lname] - GB1 - RS1 - GB2 - RS2  -&gt; TS[Author] - 
FIL[in_bloom(fname) ^ in_bloom(lname) ^ in_bloom(age)]
  *        \ SEL[age]   - GB1 - RS1 - GB2 - RS2  /
  * </pre>
  * <h3>AFTER:</h3>
  * <pre>
- * SOURCE - SEL[fname, lname, age] - GB1 - RS1 - GB2 - RS2 -> TS[Author] - 
FIL[in_bloom(hash(fname,lname,age)]
+ * SOURCE - SEL[fname, lname, age] - GB1 - RS1 - GB2 - RS2 -&gt; TS[Author] - 
FIL[in_bloom(hash(fname,lname,age)]
  * </pre>
- * </p>
  */
 public class SemiJoinReductionMerge extends Transform {
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/functions/HiveMergeableAggregate.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/functions/HiveMergeableAggregate.java
index 041345abda..9abd7f73cd 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/functions/HiveMergeableAggregate.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/functions/HiveMergeableAggregate.java
@@ -28,7 +28,7 @@ import org.apache.calcite.sql.type.SqlReturnTypeInference;
  * Mergeable aggregate.
  *
  * A mergeable aggregate is:
- * - accepts the same kind as inputs as the output (an X^n -> X function)
+ * - accepts the same kind as inputs as the output (an X^n -&gt; X function)
  *
  * Example: the SUM function is a great example; since SUM of SUM -s is the 
overall sum.
  */
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateSortLimitRule.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateSortLimitRule.java
index 4e84d37a4e..583a9ee456 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateSortLimitRule.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateSortLimitRule.java
@@ -30,7 +30,7 @@ import 
org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
  * Rule that adds sorting to GROUP BY col0 LIMIT n in presence of aggregate 
functions.
  * Ex.: SELECT id, count(1) FROM t_table GROUP BY id LIMIT 2
  *
- * Above query has a physical plan like Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ * Above query has a physical plan like Reducer 2 &lt;- Map 1 (SIMPLE_EDGE)
  * Both mapper and reducer edges may have multiple Mapper and Reducer 
instances to enable parallel process of data.
  * Aggregate function results are calculated in two steps:
  * 1) first mappers calculate a partial result from the rows processed by each 
instance.
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidRules.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidRules.java
index d7e2508c89..700f604b67 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidRules.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidRules.java
@@ -62,7 +62,7 @@ import java.util.TreeSet;
  * Druid rules with Hive builder factory.
  *
  * Simplify this class when upgrading to Calcite 1.26 using
- * <a href="https://issues.apache.org/jira/browse/CALCITE-4200";>
+ * <a 
href="https://issues.apache.org/jira/browse/CALCITE-4200";>CALCITE-4200</a>
  */
 public class HiveDruidRules {
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveHepExtractRelNodeRule.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveHepExtractRelNodeRule.java
index a645b896b4..12e47843d1 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveHepExtractRelNodeRule.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveHepExtractRelNodeRule.java
@@ -29,7 +29,8 @@ import org.apache.calcite.rel.RelNode;
 /**
  * The goal of this rule is to extract the RelNode from the
  * HepRelVertex node so rules do tree traversal can be applied correctly.
- * {@see HiveFieldTrimmerRule, 
HiveAggregateInsertDeleteIncrementalRewritingRule}
+ * @see HiveFieldTrimmerRule
+ * @see 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAggregateInsertDeleteIncrementalRewritingRule
  */
 public class HiveHepExtractRelNodeRule extends RelOptRule {
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectSortExchangeTransposeRule.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectSortExchangeTransposeRule.java
index d40d5c447c..cd92b86a4c 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectSortExchangeTransposeRule.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectSortExchangeTransposeRule.java
@@ -48,7 +48,7 @@ import com.google.common.collect.ImmutableList;
  *   HiveSortExchange
  *     ...
  *
- * =>
+ * =&gt;
  *
  * HiveSortExchange
  *   HiveProject
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRewriteToDataSketchesRules.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRewriteToDataSketchesRules.java
index b86423c36d..d97669936e 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRewriteToDataSketchesRules.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRewriteToDataSketchesRules.java
@@ -67,7 +67,7 @@ import com.google.common.collect.Lists;
 /**
  * This rule could rewrite aggregate calls to be calculated using sketch based 
functions.
  *
- * <br/>
+ * <br>
  * Currently it can rewrite:
  * <ul>
  *  <li>{@code count(distinct(x))} using {@code CountDistinctRewrite}
@@ -652,7 +652,7 @@ public final class HiveRewriteToDataSketchesRules {
    *  <pre>
    *   SELECT id, NTILE(4) OVER (ORDER BY id) FROM sketch_input;
    *     ⇒ SELECT id, CASE
-   *                    WHEN CEIL(ds_kll_cdf(ds, CAST(id AS FLOAT) )[0]) < 1
+   *                    WHEN CEIL(ds_kll_cdf(ds, CAST(id AS FLOAT) )[0]) &lt; 1
    *                      THEN 1
    *                    ELSE CEIL(ds_kll_cdf(ds, CAST(id AS FLOAT) )[0])
    *                  END
@@ -708,7 +708,7 @@ public final class HiveRewriteToDataSketchesRules {
    *  <pre>
    *   SELECT id, RANK() OVER (ORDER BY id) FROM sketch_input;
    *     ⇒ SELECT id, CASE
-   *                    WHEN ds_kll_n(ds) < (ceil(ds_kll_rank(ds, CAST(id AS 
FLOAT) )*ds_kll_n(ds))+1)
+   *                    WHEN ds_kll_n(ds) &lt; (ceil(ds_kll_rank(ds, CAST(id 
AS FLOAT) )*ds_kll_n(ds))+1)
    *                    THEN ds_kll_n(ds)
    *                    ELSE (ceil(ds_kll_rank(ds, CAST(id AS FLOAT) 
)*ds_kll_n(ds))+1)
    *                  END
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateInsertDeleteIncrementalRewritingRule.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateInsertDeleteIncrementalRewritingRule.java
index 601cbfc9e9..9005f643e2 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateInsertDeleteIncrementalRewritingRule.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateInsertDeleteIncrementalRewritingRule.java
@@ -61,7 +61,7 @@ import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveHepExtractRelNodeRu
  *   JOIN TAB_B ON (TAB_A.a = TAB_B.z)
  *   WHERE TAB_A.ROW_ID &gt; 5
  *   GROUP BY a, b) source
- * ON (mv.a <=> source.a AND mv.b <=> source.b)
+ * ON (mv.a &lt;=&gt; source.a AND mv.b &lt;=&gt; source.b)
  * WHEN MATCHED AND mv.c + source.c &lt;&gt; 0
  *   THEN UPDATE SET mv.s = mv.s + source.s, mv.c = mv.c + source.c
  * WHEN MATCHED AND countStar = 0 THEN DELETE
@@ -70,13 +70,13 @@ import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveHepExtractRelNodeRu
  *
  * To be precise, we need to convert it into a MERGE rewritten as:
  * FROM (select *, true flag from mv) mv right outer join _source_ source
- * ON (mv.a <=> source.a AND mv.b <=> source.b)
- * INSERT INTO TABLE mv                                       &lt- (insert new 
rows into the view)
+ * ON (mv.a &lt;=&gt; source.a AND mv.b &lt;=&gt; source.b)
+ * INSERT INTO TABLE mv                                       &lt;- (insert 
new rows into the view)
  *   SELECT source.a, source.b, s, c
  *   WHERE mv.flag IS NULL
- * INSERT INTO TABLE mv                                       &lt- (update 
existing rows in the view)
+ * INSERT INTO TABLE mv                                       &lt;- (update 
existing rows in the view)
  *   SELECT mv.ROW__ID, source.a, source.b,
- *     CASE WHEN mv.s IS NULL AND source.s IS NULL THEN NULL  &lt- (use case 
expression to handle nulls from both sides)
+ *     CASE WHEN mv.s IS NULL AND source.s IS NULL THEN NULL  &lt;- (use case 
expression to handle nulls from both sides)
  *          WHEN mv.s IS NULL THEN source.s
  *          WHEN source.s IS NULL THEN mv.s
  *          ELSE mv.s + source.s END,
@@ -86,12 +86,12 @@ import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveHepExtractRelNodeRu
  *          ELSE mv.s + source.s END countStar,
  *   WHERE mv.flag AND countStar &lt;&gt; 0
  *   SORT BY mv.ROW__ID;
- * INSERT INTO TABLE mv                                       &lt- (delete 
from the views)
+ * INSERT INTO TABLE mv                                       &lt;- (delete 
from the views)
  *   SELECT mv.ROW__ID
  *   WHERE mv.flag AND countStar = 0
  *   SORT BY mv.ROW__ID;
  *
- * {@see CalcitePlanner#fixUpASTAggregateInsertDeleteIncrementalRebuild}
+ * @see org.apache.hadoop.hive.ql.parse.CalcitePlanner
  */
 public class HiveAggregateInsertDeleteIncrementalRewritingRule extends 
HiveAggregateIncrementalRewritingRuleBase<
         
HiveAggregateInsertDeleteIncrementalRewritingRule.IncrementalComputePlanWithDeletedRows>
 {
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateInsertIncrementalRewritingRule.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateInsertIncrementalRewritingRule.java
index 60d444588b..18d7abcafb 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateInsertIncrementalRewritingRule.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateInsertIncrementalRewritingRule.java
@@ -57,7 +57,7 @@ import java.util.List;
  *   JOIN TAB_B ON (TAB_A.a = TAB_B.z)
  *   WHERE TAB_A.ROW_ID &gt; 5
  *   GROUP BY a, b) source
- * ON (mv.a <=> source.a AND mv.b <=> source.b)
+ * ON (mv.a &lt;=&gt; source.a AND mv.b &lt;=&gt; source.b)
  * WHEN MATCHED AND mv.c + source.c &lt;&gt; 0
  *   THEN UPDATE SET mv.s = mv.s + source.s, mv.c = mv.c + source.c
  * WHEN NOT MATCHED
@@ -65,7 +65,7 @@ import java.util.List;
  *
  * To be precise, we need to convert it into a MERGE rewritten as:
  * FROM (select *, true flag from mv) mv right outer join _source_ source
- * ON (mv.a <=> source.a AND mv.b <=> source.b)
+ * ON (mv.a &lt;=&gt; source.a AND mv.b &lt;=&gt; source.b)
  * INSERT INTO TABLE mv
  *   SELECT source.a, source.b, s, c
  *   WHERE mv.flag IS NULL
@@ -79,7 +79,7 @@ import java.util.List;
  *   WHERE mv.flag
  *   SORT BY mv.ROW__ID;
  *
- * {@see CalcitePlanner#fixUpASTAggregateInsertIncrementalRebuild}
+ * @see org.apache.hadoop.hive.ql.parse.CalcitePlanner
  */
 public class HiveAggregateInsertIncrementalRewritingRule extends 
HiveAggregateIncrementalRewritingRuleBase<
         HiveAggregateIncrementalRewritingRuleBase.IncrementalComputePlan> {
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregatePartitionIncrementalRewritingRule.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregatePartitionIncrementalRewritingRule.java
index 0d24cde8e6..7e582ed29b 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregatePartitionIncrementalRewritingRule.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregatePartitionIncrementalRewritingRule.java
@@ -59,9 +59,9 @@ import static 
org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.findRe
  *
  * SELECT a, b, sum(sumc) FROM (
  *     SELECT a, b, sumc FROM mat1
- *     LEFT SEMI JOIN (SELECT a, b, sum(c) FROM t1 WHERE ROW__ID.writeId > 1 
GROUP BY b, a) q ON (mat1.a <=> q.a)
+ *     LEFT SEMI JOIN (SELECT a, b, sum(c) FROM t1 WHERE ROW__ID.writeId &gt; 
1 GROUP BY b, a) q ON (mat1.a &lt;=&gt; q.a)
  *     UNION ALL
- *     SELECT a, b, sum(c) sumc FROM t1 WHERE ROW__ID.writeId > 1 GROUP BY b, a
+ *     SELECT a, b, sum(c) sumc FROM t1 WHERE ROW__ID.writeId &gt; 1 GROUP BY 
b, a
  * ) sub
  * GROUP BY b, a
  */
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveJoinInsertDeleteIncrementalRewritingRule.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveJoinInsertDeleteIncrementalRewritingRule.java
index 16b3ea19f2..57274e55d8 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveJoinInsertDeleteIncrementalRewritingRule.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveJoinInsertDeleteIncrementalRewritingRule.java
@@ -44,11 +44,11 @@ import java.util.List;
  * MULTI INSERT statement instead: one insert branch for inserted rows
  * and another for inserting deleted rows to delete delta.
  * Since CBO plan does not contain the INSERT branches we focus on the SELECT 
part of the plan in this rule.
- * See also {@link 
CalcitePlanner#fixUpASTJoinInsertDeleteIncrementalRebuild(ASTNode)}
+ * See also {@link CalcitePlanner}
  *
  * FROM (select mv.ROW__ID, mv.a, mv.b from mv) mv
  * RIGHT OUTER JOIN (SELECT _source_.ROW__IS_DELETED,_source_.a, _source_.b 
FROM _source_) source
- * ON (mv.a <=> source.a AND mv.b <=> source.b)
+ * ON (mv.a &lt;=&gt; source.a AND mv.b &lt;=&gt; source.b)
  * INSERT INTO TABLE mv_delete_delta
  *   SELECT mv.ROW__ID
  *   WHERE source.ROW__IS__DELETED
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
index 87599815e5..89d6024cc0 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
@@ -532,9 +532,9 @@ public class RexNodeConverter {
    * </pre>
    * Or:
    * <pre>
-   * (c,d) IN ( (v1,v2), (v3,v4), ...) =&gt; (c=v1 && d=v2) || (c=v3 && d=v4) 
|| ...
+   * (c,d) IN ( (v1,v2), (v3,v4), ...) =&gt; (c=v1 &amp;&amp; d=v2) || (c=v3 
&amp;&amp; d=v4) || ...
    * Input: ((c,d), (v1,v2), (v3,v4), ...)
-   * Output: (c=v1 && d=v2, c=v3 && d=v4, ...)
+   * Output: (c=v1 &amp;&amp; d=v2, c=v3 &amp;&amp; d=v4, ...)
    * </pre>
    *
    * Returns null if the transformation fails, e.g., when non-deterministic
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/topnkey/CommonKeyPrefix.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/topnkey/CommonKeyPrefix.java
index 049466050b..f1b6da9568 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/topnkey/CommonKeyPrefix.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/topnkey/CommonKeyPrefix.java
@@ -78,10 +78,10 @@ public final class CommonKeyPrefix {
    *
    *      opKeys: Column[_col0], Column[_col1], Column[_col2], Column[_col3]
    *      parentKeys: Column[KEY._col0], Column[KEY._col1], Column[KEY._col4]
-   *      parentColExprMap: {_col0 -> Column[KEY._col0]}, {_col1 -> 
Column[KEY._col1]}, {_col4 -> Column[KEY._col4]}
+   *      parentColExprMap: {_col0 -&gt; Column[KEY._col0]}, {_col1 -&gt; 
Column[KEY._col1]}, {_col4 -&gt; Column[KEY._col4]}
    *
    * Column ordering and null ordering is given by a string where each 
character represents a column order/null order.
-   * Ex.: a ASC NULLS FIRST, b DESC NULLS LAST, c ASC NULLS LAST -> 
order="+-+", null order="azz"
+   * Ex.: a ASC NULLS FIRST, b DESC NULLS LAST, c ASC NULLS LAST -&gt; 
order="+-+", null order="azz"
    *
    * When <code>parentColExprMap</code> is null this method falls back to
    * {@link #map(List, String, String, List, String, String)}.
@@ -90,7 +90,7 @@ public final class CommonKeyPrefix {
    * @param opOrder operator's key column ordering in {@link String} format
    * @param opNullOrder operator's key column null ordering in {@link String} 
format
    * @param parentKeys {@link List} of {@link ExprNodeDesc}. contains the 
parent operator's key columns
-   * @param parentColExprMap {@link Map} of {@link String} -> {@link 
ExprNodeDesc}.
+   * @param parentColExprMap {@link Map} of {@link String} -&gt; {@link 
ExprNodeDesc}.
    *                                    contains parent operator's key column 
name {@link ExprNodeDesc} mapping
    * @param parentOrder parent operator's key column ordering in {@link 
String} format
    * @param parentNullOrder parent operator's key column null ordering in 
{@link String} format
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/topnkey/TopNKeyProcessor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/topnkey/TopNKeyProcessor.java
index 7aa2a54ecd..47fb9cb770 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/topnkey/TopNKeyProcessor.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/topnkey/TopNKeyProcessor.java
@@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * TopNKeyProcessor is a processor for TopNKeyOperator.
- * A TopNKeyOperator will be placed before any ReduceSinkOperator which has a 
topN property >= 0.
+ * A TopNKeyOperator will be placed before any ReduceSinkOperator which has a 
topN property &gt;= 0.
  */
 public class TopNKeyProcessor implements SemanticNodeProcessor {
   private static final Logger LOG = 
LoggerFactory.getLogger(TopNKeyProcessor.class);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 59a1fdf347..6ba9a9075f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -2405,7 +2405,6 @@ public class CalcitePlanner extends SemanticAnalyzer {
      * @param isCollection
      * @param order
      * @param rules
-     * @return HEP program
      */
     protected void generatePartialProgram(HepProgramBuilder programBuilder, 
boolean isCollection, HepMatchOrder order,
         RelOptRule... rules) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java
index 0e0b8d6db3..8c5cd852b3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/UnparseTranslator.java
@@ -137,7 +137,7 @@ public class UnparseTranslator {
   /**
    * Register a translation for an tabName.
    *
-   * @param node
+   * @param tableName
    *          source node (which must be an tabName) to be replaced
    */
   public void addTableNameTranslation(ASTNode tableName, String 
currentDatabaseName) {
@@ -175,7 +175,7 @@ public class UnparseTranslator {
   /**
    * Register a translation for an identifier.
    *
-   * @param node
+   * @param identifier
    *          source node (which must be an identifier) to be replaced
    */
   public void addIdentifierTranslation(ASTNode identifier) {
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/type/FunctionHelper.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/type/FunctionHelper.java
index 3797786109..d4e1dbcdac 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/type/FunctionHelper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/type/FunctionHelper.java
@@ -98,7 +98,7 @@ public interface FunctionHelper {
   boolean isInFunction(FunctionInfo fi);
 
   /**
-   * returns true if FunctionInfo is a compare function (e.g. '<=')
+   * returns true if FunctionInfo is a compare function (e.g. '&lt;=')
    */
   boolean isCompareFunction(FunctionInfo fi);
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
index 796ef72c19..167b2728f5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
@@ -106,7 +106,7 @@ public abstract class CompactorThread extends Thread 
implements Configurable {
    * Get the partition being compacted.
    * @param ci compaction info returned from the compaction queue
    * @return metastore partition, or null if there is not partition in this 
compaction info
-   * @throws Exception if underlying calls throw, or if the partition name 
resolves to more than
+   * @throws MetaException if underlying calls throw, or if the partition name 
resolves to more than
    * one partition.
    */
   protected Partition resolvePartition(CompactionInfo ci) throws MetaException 
{
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRank.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRank.java
index 644e7283c0..a6eb3752b4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRank.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRank.java
@@ -274,7 +274,7 @@ public class GenericUDAFRank extends 
AbstractGenericUDAFResolver {
    *  Calculates the rank of a hypothetical row specified by the arguments of 
the
    *  function in a group of values specified by the order by clause.
    *  SELECT rank(expression1[, expressionn]*) WITHIN GROUP (ORDER BY col1[, 
coln]*)
-   *  (the number of rows where col1 < expression1 [and coln < expressionn]*) 
+ 1
+   *  (the number of rows where col1 &lt; expression1 [and coln &lt; 
expressionn]*) + 1
    */
   public static class GenericUDAFHypotheticalSetRankEvaluator extends 
GenericUDAFEvaluator {
     public static final String RANK_FIELD = "rank";
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCastFormat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCastFormat.java
index 6e24f9cf0a..36fdae15e7 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCastFormat.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCastFormat.java
@@ -53,7 +53,7 @@ import java.io.Serializable;
 import java.util.Map;
 
 /**
- * CAST(<value> AS <type> FORMAT <STRING>).
+ * CAST(&lt;value&gt; AS &lt;type&gt; FORMAT &lt;STRING&gt;).
  *
  * Vector expressions: CastDateToCharWithFormat, CastDateToStringWithFormat,
  *     CastDateToVarCharWithFormat, CastTimestampToCharWithFormat,
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/JsonSerDe.java 
b/serde/src/java/org/apache/hadoop/hive/serde2/JsonSerDe.java
index 14d5c6ea78..534246e5f3 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/JsonSerDe.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/JsonSerDe.java
@@ -41,10 +41,10 @@ import org.apache.hive.common.util.TimestampParser;
 /**
  * Hive SerDe for processing JSON formatted data. This is typically paired with
  * the TextInputFormat and therefore each line provided to this SerDe must be a
- * single, and complete JSON object.<br/>
+ * single, and complete JSON object.<br>
  * <h2>Example</h2>
  * <p>
- * {"name="john","age"=30}<br/>
+ * {"name="john","age"=30}<br>
  * {"name="sue","age"=32}
  * </p>
  */
diff --git 
a/serde/src/java/org/apache/hadoop/hive/serde2/json/BinaryEncoding.java 
b/serde/src/java/org/apache/hadoop/hive/serde2/json/BinaryEncoding.java
index 02f26b0d74..255562a8d2 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/json/BinaryEncoding.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/json/BinaryEncoding.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hive.serde2.json;
 
 /**
- * Enums describing the available String->Bytes encoding available for JSON
+ * Enums describing the available String-&gt;Bytes encoding available for JSON
  * parsing. This base-64 variant is what most people would think of "the
  * standard" Base64 encoding for JSON: the specific MIME content transfer
  * encoding. The Raw String encoding produces an array of bytes by reading the
diff --git 
a/serde/src/java/org/apache/hadoop/hive/serde2/json/HiveJsonReader.java 
b/serde/src/java/org/apache/hadoop/hive/serde2/json/HiveJsonReader.java
index 6e9bb4e226..7ade47b6e5 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/json/HiveJsonReader.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/json/HiveJsonReader.java
@@ -71,9 +71,9 @@ import com.google.common.base.Preconditions;
 /**
  * This class converts JSON strings into Java or Hive Primitive objects.
  *
- * Support types are:<br/>
- * <br/>
- * <table border="1">
+ * Support types are:<br>
+ * <br>
+ * <table border="1" summary="">
  * <tr>
  * <th>JSON Type</th>
  * <th>Java Type</th>
diff --git 
a/service/src/java/org/apache/hive/service/cli/operation/QueryInfoCache.java 
b/service/src/java/org/apache/hive/service/cli/operation/QueryInfoCache.java
index 59b5cd029b..6dff3477b7 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/QueryInfoCache.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/QueryInfoCache.java
@@ -78,7 +78,7 @@ public class QueryInfoCache {
   }
 
   /**
-   * Remove the live operation's query info from the {@liveQueryInfos},
+   * Remove the live operation's query info from the {@link #liveQueryInfos},
    * and push the query info to the historic query cache if enabled.
    * @param operation the to remove operation
    */
diff --git 
a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java 
b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
index 242990abf0..5696393d6c 100644
--- a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
+++ b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
@@ -259,8 +259,6 @@ public interface HadoopShims {
    * @param filter A filter to use on the files in the directory
    * @return A list of file status with IDs
    * @throws IOException An I/O exception of some sort has occurred
-   * @throws FileNotFoundException If the path is not found in the
-   *           {@code FileSystem}
    * @throws UnsupportedOperationException the {@code FileSystem} is not a
    *           {@code DistributedFileSystem}
    */
diff --git 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 5e8a81f466..81f6083ce4 100644
--- 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -4748,7 +4748,7 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient, AutoCloseable {
    * if the validWriteIdList is not explicitly passed (as a method argument) 
to the HMS APIs.
    * This method returns the ValidWriteIdList based on the 
VALID_TABLES_WRITEIDS_KEY key.
    * Since, VALID_TABLES_WRITEIDS_KEY is set during the lock acquisition phase 
after query compilation
-   * ( DriverTxnHandler.acquireLocks -> recordValidWriteIds -> 
setValidWriteIds ),
+   * ( DriverTxnHandler.acquireLocks -&gt; recordValidWriteIds -&gt; 
setValidWriteIds ),
    * this only covers a subset of cases, where we invoke get_* APIs after 
query compilation,
    * if the validWriteIdList is not explicitly passed (as a method argument) 
to the HMS APIs.
    */
diff --git 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 3056a598fb..eaa4542de4 100644
--- 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -252,9 +252,7 @@ public interface IMetaStoreClient {
    * @param dbName Name of the database to fetch tables from.
    * @param tablePattern pattern to match the tables names.
    * @param requestedFields An int bitmask to indicate the depth of the 
returned objects
-   * @param processorCapabilities A list of "capabilities" possessed by the 
caller, to be matched with table's params
-   * @param processorId Any string id to identify the caller/client, for 
logging purposes only.
-   * @param limit Maximum size of the result set. <=0 indicates no limit
+   * @param limit Maximum size of the result set. &lt;=0 indicates no limit
    * @return List of ExtendedTableInfo that match the input arguments.
    * @throws MetaException Thrown if there is error on fetching from DBMS.
    * @throws TException Thrown if there is a thrift transport exception.
@@ -1689,9 +1687,7 @@ public interface IMetaStoreClient {
    * @throws MetaException
    * @throws NoSuchObjectException
    * @throws TException
-   * @see 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.CreateTableRequest)
    */
-
   void createTable(Table tbl) throws AlreadyExistsException,
       InvalidObjectException, MetaException, NoSuchObjectException, TException;
 
@@ -1703,7 +1699,6 @@ public interface IMetaStoreClient {
    * @throws NoSuchObjectException
    * @throws TException
    */
-
   void createTable(CreateTableRequest request) throws AlreadyExistsException,
           InvalidObjectException, MetaException, NoSuchObjectException, 
TException;
 
@@ -2583,7 +2578,7 @@ public interface IMetaStoreClient {
   /**
    * Get the column statistics for a set of columns in a table.  This should 
only be used for
    * non-partitioned tables.  For partitioned tables use
-   * {@link #getPartitionColumnStatistics(String, String, List, List)}.
+   * {@link #getPartitionColumnStatistics(String, String, List, List, String)}.
    * @param dbName database name
    * @param tableName table name
    * @param colNames list of column names
@@ -2603,7 +2598,7 @@ public interface IMetaStoreClient {
   /**
    * Get the column statistics for a set of columns in a table.  This should 
only be used for
    * non-partitioned tables.  For partitioned tables use
-   * {@link #getPartitionColumnStatistics(String, String, String, List, List)}.
+   * {@link #getPartitionColumnStatistics(String, String, String, List, List, 
String)}.
    * @param catName catalog name
    * @param dbName database name
    * @param tableName table name
diff --git 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
index b8ab28b45c..cf8825ec16 100644
--- 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
+++ 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
@@ -353,7 +353,6 @@ public class FileUtils {
    * @param fs
    *          the file system
    * @return array of FileStatus
-   * @throws IOException
    */
   public static List<FileStatus> getFileStatusRecurse(Path base, FileSystem 
fs) {
     try {
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ExceptionHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ExceptionHandler.java
index ffac60831e..4e4950f99f 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ExceptionHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ExceptionHandler.java
@@ -81,8 +81,8 @@ public final class ExceptionHandler {
   }
 
   /**
-   * Converts the input exception to the target instance of class {@param 
target},
-   * if the input exception is the instance of class {@param source}, throws 
the
+   * Converts the input exception to the target instance of class {@code 
target},
+   * if the input exception is the instance of class {@code source}, throws the
    * converted target exception.
    */
   public <S extends Exception, T extends TException> ExceptionHandler
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreMetadataTransformer.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreMetadataTransformer.java
index 6f0bf91df2..4360f09dfa 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreMetadataTransformer.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreMetadataTransformer.java
@@ -41,7 +41,7 @@ public interface IMetaStoreMetadataTransformer {
   * @param processorCapabilities A array of String capabilities received from 
the data processor
   * @param processorId String ID used for logging purpose.
   * @return Map A Map of transformed objects keyed by Table and value is list 
of required capabilities
-  * @throws HiveMetaException
+  * @throws MetaException
   */
   // TODO HiveMetaException or MetaException
   public Map<Table, List<String>> transform(List<Table> tables, List<String> 
processorCapabilities,
@@ -53,7 +53,7 @@ public interface IMetaStoreMetadataTransformer {
   * @param processorCapabilities A array of String capabilities received from 
the data processor
   * @param processorId String ID used for logging purpose.
   * @return Map A Map of transformed objects keyed by Partition and value is 
list of required capabilities
-  * @throws HiveMetaException
+  * @throws MetaException
   */
   // TODO HiveMetaException or MetaException
   public List<Partition> transformPartitions(List<Partition> parts, Table 
table, List<String> processorCapabilities,
@@ -64,7 +64,7 @@ public interface IMetaStoreMetadataTransformer {
   * @param processorCapabilities A array of String capabilities received from 
the data processor
   * @param processorId String ID used for logging purpose.
   * @return Table An altered Table based on the processor capabilities
-  * @throws HiveMetaException
+  * @throws MetaException
   */
  public Table transformCreateTable(Table table, List<String> 
processorCapabilities,
      String processorId) throws MetaException;
@@ -74,17 +74,17 @@ public interface IMetaStoreMetadataTransformer {
   * @param processorCapabilities A array of String capabilities received from 
the data processor
   * @param processorId String ID used for logging purpose.
   * @return Database An altered Database based on the processor capabilities
-  * @throws HiveMetaException
+  * @throws MetaException
   */
  public Database transformDatabase(Database db, List<String> 
processorCapabilities,
      String processorId) throws MetaException;
 
   /**
-  * @param table A table object to be transformed prior to the alteration of a 
table
+  * @param oldTable A table object to be transformed prior to the alteration 
of a table
   * @param processorCapabilities A array of String capabilities received from 
the data processor
   * @param processorId String ID used for logging purpose.
   * @return Table An altered Table based on the processor capabilities
-  * @throws HiveMetaException
+  * @throws MetaException
   */
   public Table transformAlterTable(Table oldTable, Table newTable, 
List<String> processorCapabilities,
      String processorId) throws MetaException;
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java
index feb171a384..800ea03b81 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java
@@ -343,7 +343,7 @@ public abstract class MetaStoreEventListener implements 
Configurable {
 
   /**
    * This will be called to perform acid write operation in a batch.
-   * @param acidWriteEvent event to be processed
+   * @param batchAcidWriteEvent event to be processed
    * @param dbConn jdbc connection to remote meta store db.
    * @param sqlGenerator helper class to generate db specific sql string.
    * @throws MetaException
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index e886098d45..fac5b836e2 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -290,7 +290,7 @@ public interface RawStore extends Configurable {
    * Get the dataconnector with a given name, if exists.
    * @param dcName pattern names should match
    * @return DataConnector object.
-   * @throws MetaException something went wrong, usually with the database.
+   * @throws NoSuchObjectException something went wrong, usually with the 
database.
    */
   DataConnector getDataConnector(String dcName) throws NoSuchObjectException;
 
@@ -542,7 +542,6 @@ public interface RawStore extends Configurable {
    * Retrieve all materialized views.
    * @return all materialized views in a catalog
    * @throws MetaException error querying the RDBMS
-   * @throws NoSuchObjectException no such database
    */
   List<Table> getAllMaterializedViewObjectsForRewriting(String catName)
       throws MetaException;
@@ -1638,7 +1637,7 @@ public interface RawStore extends Configurable {
   /**
    * SQLPrimaryKey represents a single primary key column.
    * Since a table can have one or more primary keys ( in case of composite 
primary key ),
-   * this method returns List<SQLPrimaryKey>
+   * this method returns List&lt;SQLPrimaryKey&gt;
    * @param request primary key request
    * @return list of primary key columns or an empty list if the table does 
not have a primary key
    * @throws MetaException error accessing the RDBMS
@@ -1667,7 +1666,7 @@ public interface RawStore extends Configurable {
   /**
    * SQLForeignKey represents a single foreign key column.
    * Since a table can have one or more foreign keys ( in case of composite 
foreign key ),
-   * this method returns List<SQLForeignKey>
+   * this method returns List&lt;SQLForeignKey&gt;
    * @param request ForeignKeysRequest object
    * @return List of all matching foreign key columns.  Note that if more than 
one foreign key
    * matches the arguments the results here will be all mixed together into a 
single list.
@@ -1691,7 +1690,7 @@ public interface RawStore extends Configurable {
   /**
    * SQLUniqueConstraint represents a single unique constraint column.
    * Since a table can have one or more unique constraint ( in case of 
composite unique constraint ),
-   * this method returns List<SQLUniqueConstraint>
+   * this method returns List&lt;SQLUniqueConstraint&gt;
    * @param request UniqueConstraintsRequest object.
    * @return list of unique constraints
    * @throws MetaException error access the RDBMS.
@@ -1713,7 +1712,7 @@ public interface RawStore extends Configurable {
   /**
    * SQLNotNullConstraint represents a single not null constraint column.
    * Since a table can have one or more not null constraint ( in case of 
composite not null constraint ),
-   * this method returns List<SQLNotNullConstraint>
+   * this method returns List&lt;SQLNotNullConstraint&gt;
    * @param request NotNullConstraintsRequest object.
    * @return list of not null constraints
    * @throws MetaException error accessing the RDBMS.
@@ -1735,7 +1734,7 @@ public interface RawStore extends Configurable {
   /**
    * SQLDefaultConstraint represents a single default constraint column.
    * Since a table can have one or more default constraint ( in case of 
composite default constraint ),
-   * this method returns List<SQLDefaultConstraint>
+   * this method returns List&lt;SQLDefaultConstraint&gt;
    * @param request DefaultConstraintsRequest object.
    * @return list of default values defined on the table.
    * @throws MetaException error accessing the RDBMS
@@ -1757,7 +1756,7 @@ public interface RawStore extends Configurable {
   /**
    * SQLCheckConstraint represents a single check constraint column.
    * Since a table can have one or more check constraint ( in case of 
composite check constraint ),
-   * this method returns List<SQLCheckConstraint>
+   * this method returns List&lt;SQLCheckConstraint&gt;
    * @param request CheckConstraintsRequest object.
    * @return ccheck constraints for this table
    * @throws MetaException error accessing the RDBMS
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/DerbySQLConnectorProvider.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/DerbySQLConnectorProvider.java
index a6d53752b3..5a8dac3524 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/DerbySQLConnectorProvider.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/dataconnector/jdbc/DerbySQLConnectorProvider.java
@@ -43,7 +43,7 @@ public class DerbySQLConnectorProvider extends 
AbstractJDBCConnectorProvider {
   /**
    * Returns a list of all table names from the remote database.
    * @return List A collection of all the table names, null if there are no 
tables.
-   * @throws IOException To indicate any failures with executing this API
+   * @throws MetaException To indicate any failures with executing this API
    */
   @Override
   protected ResultSet fetchTableNames() throws MetaException {
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
index 71df48993d..9d2ebc7b09 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
@@ -746,7 +746,7 @@ public interface TxnStore extends Configurable {
 
   /**
    * Returns the top ACID metrics from each type {@link 
CompactionMetricsData.MetricType}
-   * @oaram limit number of returned records for each type
+   * @param limit number of returned records for each type
    * @return list of metrics, always non-null
    * @throws MetaException
    */

Reply via email to