This is an automated email from the ASF dual-hosted git repository.

kgyrtkirk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git

commit 2fcde8ef6adf75f7ab4684629a8cf807f054e243
Author: Robert Kucsora <kucsora.rob...@gmail.com>
AuthorDate: Wed Apr 3 12:16:36 2019 +0200

    HIVE-16815: Clean up javadoc from error for the rest of modules (Robert 
Kucsora via Zoltan Haindrich)
    
    Signed-off-by: Zoltan Haindrich <k...@rxd.hu>
---
 .../predicate/AccumuloPredicateHandler.java        |  4 +--
 .../accumulo/serde/AccumuloCompositeRowId.java     | 10 +++---
 .../java/org/apache/hive/http/ProfileServlet.java  |  4 +--
 .../apache/hadoop/hive/contrib/mr/GenericMR.java   |  2 +-
 .../hadoop/hive/contrib/serde2/RegexSerDe.java     |  4 +--
 .../hive/contrib/udaf/example/UDAFExampleAvg.java  |  2 +-
 .../hadoop/hive/hbase/HBaseCompositeKey.java       |  9 +++--
 .../org/apache/hadoop/hive/hbase/HBaseSerDe.java   |  2 +-
 .../hadoop/hive/hbase/struct/HBaseStructValue.java |  9 +++--
 .../hive/hbase/struct/HBaseValueFactory.java       |  4 +--
 .../apache/hive/hcatalog/common/HCatConstants.java |  2 +-
 .../org/apache/hive/hcatalog/common/HCatUtil.java  | 14 ++++----
 .../hive/hcatalog/data/transfer/HCatWriter.java    |  6 ++--
 .../hcatalog/listener/NotificationListener.java    |  4 +--
 .../hcatalog/streaming/AbstractRecordWriter.java   |  2 +-
 .../hcatalog/streaming/DelimitedInputWriter.java   |  4 +--
 .../hive/hcatalog/streaming/HiveEndPoint.java      |  2 +-
 .../hive/hcatalog/streaming/RecordWriter.java      |  2 +-
 .../hcatalog/streaming/StreamingConnection.java    |  2 +-
 .../hive/hcatalog/streaming/StrictJsonWriter.java  |  2 +-
 .../hive/hcatalog/streaming/StrictRegexWriter.java |  2 +-
 .../hive/hcatalog/streaming/TransactionBatch.java  |  2 +-
 .../mutate/worker/MutatorCoordinator.java          |  2 +-
 .../org/apache/hive/hcatalog/api/HCatClient.java   | 27 +++++++-------
 .../hive/hcatalog/api/repl/ReplicationTask.java    | 12 +++----
 .../src/main/java/org/apache/hive/hplsql/Meta.java |  2 +-
 .../main/java/org/apache/hive/hplsql/Utils.java    |  4 +--
 .../src/main/java/org/apache/hive/hplsql/Var.java  |  2 +-
 jdbc/src/java/org/apache/hive/jdbc/Utils.java      |  4 +--
 .../java/org/apache/hadoop/hive/llap/LlapDump.java |  3 +-
 .../apache/hadoop/hive/llap/ConsumerFeedback.java  |  2 +-
 .../hadoop/hive/llap/cache/LowLevelCache.java      |  2 +-
 .../llap/daemon/impl/PriorityBlockingDeque.java    | 42 +++++++++++-----------
 .../hive/llap/daemon/impl/TaskExecutorService.java |  6 ++--
 .../hive/llap/daemon/impl/TaskRunnerCallable.java  |  2 +-
 .../hive/llap/metrics/LlapDaemonJvmInfo.java       |  2 +-
 .../org/apache/hadoop/hive/ql/plan/DDLWork.java    |  4 ---
 .../teradata/TeradataBinaryDataInputStream.java    |  6 ++--
 .../teradata/TeradataBinaryDataOutputStream.java   |  6 ++--
 .../src/java/org/apache/hive/service/Service.java  |  2 +-
 .../org/apache/hive/service/ServiceOperations.java | 10 +++---
 .../apache/hive/service/auth/HttpAuthUtils.java    |  2 +-
 .../service/auth/PasswdAuthenticationProvider.java |  2 +-
 .../hive/service/auth/TSetIpAddressProcessor.java  |  4 +--
 .../auth/ldap/CustomQueryFilterFactory.java        |  2 +-
 .../hive/service/auth/ldap/GroupFilterFactory.java |  2 +-
 .../apache/hive/service/auth/ldap/LdapUtils.java   | 12 +++----
 .../service/auth/ldap/SearchResultHandler.java     |  2 +-
 .../hive/service/auth/ldap/UserFilterFactory.java  |  2 +-
 .../apache/hive/service/cli/CLIServiceUtils.java   |  2 +-
 .../cli/operation/ClassicTableTypeMapping.java     |  6 ++--
 .../service/cli/operation/TableTypeMapping.java    |  2 +-
 .../server/ThreadFactoryWithGarbageCleanup.java    |  6 ++--
 .../apache/hadoop/hive/metastore/HiveMetaHook.java |  2 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java    |  7 ++--
 .../apache/hadoop/hive/metastore/Warehouse.java    |  2 +-
 .../hadoop/hive/metastore/utils/FilterUtils.java   |  1 -
 .../apache/hadoop/hive/metastore/AlterHandler.java | 10 +++---
 .../MaterializationsRebuildLockHandler.java        |  9 ++---
 .../hadoop/hive/metastore/PartitionIterable.java   |  6 ++--
 .../org/apache/hadoop/hive/metastore/RawStore.java | 10 ++----
 .../hadoop/hive/metastore/events/OpenTxnEvent.java |  2 +-
 .../hadoop/hive/metastore/txn/CompactionInfo.java  |  2 +-
 .../apache/hadoop/hive/metastore/txn/TxnUtils.java |  6 ++--
 .../hadoop/hive/common/io/FileMetadataCache.java   |  2 --
 .../hive/streaming/HiveStreamingConnection.java    |  4 +--
 .../hive/streaming/StreamingTransaction.java       |  2 +-
 .../hive/testutils/jdbc/HiveBurnInClient.java      |  2 +-
 68 files changed, 165 insertions(+), 180 deletions(-)

diff --git 
a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
 
b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
index 0774d84..834ed25 100644
--- 
a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
+++ 
b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
@@ -134,7 +134,7 @@ public class AccumuloPredicateHandler {
    *
    * @param udfType
    *          GenericUDF classname to lookup matching CompareOpt
-   * @return Class<? extends CompareOpt/>
+   * @return Class&lt;? extends CompareOpt/&gt;
    */
   public Class<? extends CompareOp> getCompareOpClass(String udfType)
       throws NoSuchCompareOpException {
@@ -166,7 +166,7 @@ public class AccumuloPredicateHandler {
    *
    * @param type
    *          String hive column lookup matching PrimitiveCompare
-   * @return Class<? extends ></?>
+   * @return Class&lt;? extends &gt;&lt;/?&gt;
    */
   public Class<? extends PrimitiveComparison> 
getPrimitiveComparisonClass(String type)
       throws NoSuchPrimitiveComparisonException {
diff --git 
a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloCompositeRowId.java
 
b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloCompositeRowId.java
index 5c6497e..81da305 100644
--- 
a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloCompositeRowId.java
+++ 
b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloCompositeRowId.java
@@ -33,21 +33,21 @@ import 
org.apache.hadoop.hive.serde2.objectinspector.StructField;
  * AccumuloCompositeKey extension of LazyStruct. All complex composite keys 
should extend this class
  * and override the {@link LazyStruct#getField(int)} method where fieldID 
corresponds to the ID of a
  * key in the composite key.
- * <p>
+ * <br>
  * For example, for a composite key <i>"/part1/part2/part3"</i>, <i>part1</i> 
will have an id
  * <i>0</i>, <i>part2</i> will have an id <i>1</i> and <i>part3</i> will have 
an id <i>2</i>. Custom
  * implementations of getField(fieldID) should return the value corresponding 
to that fieldID. So,
- * for the above example, the value returned for <i>getField(0)</i> should be 
</i>part1</i>,
+ * for the above example, the value returned for <i>getField(0)</i> should be 
<i>part1</i>,
  * <i>getField(1)</i> should be <i>part2</i> and <i>getField(2)</i> should be 
<i>part3</i>.
- * </p>
+ * <br>
  *
- * <p>
+ * <br>
  * All custom implementation are expected to have a constructor of the form:
  *
  * <pre>
  * MyCustomCompositeKey(LazySimpleStructObjectInspector oi, Properties tbl, 
Configuration conf)
  * </pre>
- * </p>
+ * <br>
  *
  */
 public class AccumuloCompositeRowId extends LazyStruct {
diff --git a/common/src/java/org/apache/hive/http/ProfileServlet.java 
b/common/src/java/org/apache/hive/http/ProfileServlet.java
index 4843756..e2a009f 100644
--- a/common/src/java/org/apache/hive/http/ProfileServlet.java
+++ b/common/src/java/org/apache/hive/http/ProfileServlet.java
@@ -38,7 +38,7 @@ import com.google.common.base.Joiner;
  * Servlet that runs async-profiler as web-endpoint.
  * Following options from async-profiler can be specified as query paramater.
  * //  -e event          profiling event: cpu|alloc|lock|cache-misses etc.
- * //  -d duration       run profiling for <duration> seconds (integer)
+ * //  -d duration       run profiling for &lt;duration&gt; seconds (integer)
  * //  -i interval       sampling interval in nanoseconds (long)
  * //  -j jstackdepth    maximum Java stack depth (integer)
  * //  -b bufsize        frame buffer size (long)
@@ -53,7 +53,7 @@ import com.google.common.base.Joiner;
  * - To collect 30 second CPU profile of current process (returns FlameGraph 
svg)
  * curl "http://localhost:10002/prof";
  * - To collect 1 minute CPU profile of current process and output in tree 
format (html)
- * curl "http://localhost:10002/prof?output=tree&duration=60";
+ * curl "http://localhost:10002/prof?output=tree&amp;duration=60";
  * - To collect 30 second heap allocation profile of current process (returns 
FlameGraph svg)
  * curl "http://localhost:10002/prof?event=alloc";
  * - To collect lock contention profile of current process (returns FlameGraph 
svg)
diff --git a/contrib/src/java/org/apache/hadoop/hive/contrib/mr/GenericMR.java 
b/contrib/src/java/org/apache/hadoop/hive/contrib/mr/GenericMR.java
index 5f54d5c..a8c6493 100644
--- a/contrib/src/java/org/apache/hadoop/hive/contrib/mr/GenericMR.java
+++ b/contrib/src/java/org/apache/hadoop/hive/contrib/mr/GenericMR.java
@@ -41,7 +41,7 @@ import java.util.NoSuchElementException;
  * As an example, here's the wordcount reduce:
  * 
  * new GenericMR().reduce(System.in, System.out, new Reducer() { public void
- * reduce(String key, Iterator<String[]> records, Output output) throws
+ * reduce(String key, Iterator&lt;String[]&gt; records, Output output) throws
  * Exception { int count = 0;
  * 
  * while (records.hasNext()) { count += Integer.parseInt(records.next()[1]); }
diff --git 
a/contrib/src/java/org/apache/hadoop/hive/contrib/serde2/RegexSerDe.java 
b/contrib/src/java/org/apache/hadoop/hive/contrib/serde2/RegexSerDe.java
index f7c46f6..45f32dd 100644
--- a/contrib/src/java/org/apache/hadoop/hive/contrib/serde2/RegexSerDe.java
+++ b/contrib/src/java/org/apache/hadoop/hive/contrib/serde2/RegexSerDe.java
@@ -61,8 +61,8 @@ import org.apache.hadoop.io.Writable;
  * into a row. If the output type of the column in a query is not a string, it
  * will be automatically converted to String by Hive.
  *
- * For the format of the format String, please refer to {@link http
- * ://java.sun.com/j2se/1.5.0/docs/api/java/util/Formatter.html#syntax}
+ * For the format of the format String, please refer to link: http
+ * ://java.sun.com/j2se/1.5.0/docs/api/java/util/Formatter.html#syntax
  *
  * NOTE: Obviously, all columns have to be strings. Users can use
  * "CAST(a AS INT)" to convert columns to other types.
diff --git 
a/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleAvg.java
 
b/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleAvg.java
index ddd5f99..03b7ed1 100644
--- 
a/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleAvg.java
+++ 
b/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleAvg.java
@@ -44,7 +44,7 @@ public final class UDAFExampleAvg extends UDAF {
    * by a primitive.
    * 
    * The internal state can also contains fields with types like
-   * ArrayList<String> and HashMap<String,Double> if needed.
+   * ArrayList&lt;String&gt; and HashMap&lt;String,Double&gt; if needed.
    */
   public static class UDAFAvgState {
     private long mCount;
diff --git 
a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseCompositeKey.java 
b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseCompositeKey.java
index b425099..4b558cd 100644
--- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseCompositeKey.java
+++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseCompositeKey.java
@@ -35,22 +35,21 @@ import 
org.apache.hadoop.hive.serde2.objectinspector.StructField;
  * HBaseCompositeKey extension of LazyStruct. All complex composite keys 
should extend this class
  * and override the {@link LazyStruct#getField(int)} method where fieldID 
corresponds to the ID of a
  * key in the composite key.
- * <p>
+ * <br>
  * For example, for a composite key <i>"/part1/part2/part3"</i>, <i>part1</i> 
will have an id
  * <i>0</i>, <i>part2</i> will have an id <i>1</i> and <i>part3</i> will have 
an id <i>2</i>. Custom
  * implementations of getField(fieldID) should return the value corresponding 
to that fieldID. So,
- * for the above example, the value returned for <i>getField(0)</i> should be 
</i>part1</i>,
+ * for the above example, the value returned for <i>getField(0)</i> should be 
<i>part1</i>,
  * <i>getField(1)</i> should be <i>part2</i> and <i>getField(2)</i> should be 
<i>part3</i>.
- * </p>
  * 
- * <p>
+ *
+ * <br>
  * All custom implementation are expected to have a constructor of the form:
  * 
  * <pre>
  * MyCustomCompositeKey(LazySimpleStructObjectInspector oi, Properties tbl, 
Configuration conf)
  * </pre>
  *
- * </p>
  * 
  * */
 public class HBaseCompositeKey extends LazyStruct {
diff --git 
a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDe.java 
b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDe.java
index 1553525..1588283 100644
--- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDe.java
+++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDe.java
@@ -154,7 +154,7 @@ public class HBaseSerDe extends AbstractSerDe {
    * @param columnsMappingSpec string hbase.columns.mapping specified when 
creating table
    * @param doColumnRegexMatching whether to do a regex matching on the 
columns or not
    * @param hideColumnPrefix whether to hide a prefix of column mapping in key 
name in a map (works only if @doColumnRegexMatching is true)
-   * @return List<ColumnMapping> which contains the column mapping information 
by position
+   * @return List&lt;ColumnMapping&gt; which contains the column mapping 
information by position
    * @throws org.apache.hadoop.hive.serde2.SerDeException
    */
   public static ColumnMappings parseColumnsMapping(
diff --git 
a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseStructValue.java
 
b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseStructValue.java
index f56bfca..8b66a7a 100644
--- 
a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseStructValue.java
+++ 
b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseStructValue.java
@@ -32,21 +32,20 @@ import 
org.apache.hadoop.hive.serde2.objectinspector.StructField;
  * This is an extension of LazyStruct. All value structs should extend this 
class and override the
  * {@link LazyStruct#getField(int)} method where fieldID corresponds to the ID 
of a value in the
  * value structure.
- * <p>
+ * <br>
  * For example, for a value structure <i>"/part1/part2/part3"</i>, 
<i>part1</i> will have an id
  * <i>0</i>, <i>part2</i> will have an id <i>1</i> and <i>part3</i> will have 
an id <i>2</i>. Custom
  * implementations of getField(fieldID) should return the value corresponding 
to that fieldID. So,
- * for the above example, the value returned for <i>getField(0)</i> should be 
</i>part1</i>,
+ * for the above example, the value returned for <i>getField(0)</i> should be 
<i>part1</i>,
  * <i>getField(1)</i> should be <i>part2</i> and <i>getField(2)</i> should be 
<i>part3</i>.
- * </p>
- * <p>
+ *
+ * <br>
  * All implementation are expected to have a constructor of the form <br>
  *
  * <pre>
  * MyCustomStructObject(LazySimpleStructObjectInspector oi, Properties props, 
Configuration conf, ColumnMapping colMap)
  * </pre>
  * 
- * </p>
  * */
 public class HBaseStructValue extends LazyStruct {
 
diff --git 
a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseValueFactory.java
 
b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseValueFactory.java
index 027a0e5..b013a43 100644
--- 
a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseValueFactory.java
+++ 
b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseValueFactory.java
@@ -38,7 +38,7 @@ public interface HBaseValueFactory {
   /**
    * Initialize factory with properties
    * 
-   * @param hbaseParam the {@link HBaseParameters hbase parameters}
+   * @param hbaseParam the HBaseParameters hbase parameters
    * @param conf the hadoop {@link Configuration configuration}
    * @param properties the custom {@link Properties}
    * @throws SerDeException if there was an issue initializing the factory
@@ -67,7 +67,7 @@ public interface HBaseValueFactory {
    * @param object the object to be serialized
    * @param field the {@link StructField}
    * @return the serialized value
-   * @throws {@link IOException} if there was an issue serializing the value
+   * @throws IOException if there was an issue serializing the value
    */
   byte[] serializeValue(Object object, StructField field) throws IOException;
 }
\ No newline at end of file
diff --git 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java
 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java
index 09bb0d2..1e319b4 100644
--- 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java
+++ 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java
@@ -186,7 +186,7 @@ public final class HCatConstants {
 
   /**
    * {@value} (default: {@value 
#HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER_DEFAULT}).
-   * Pig < 0.10.0 does not have boolean support, and scripts written for 
pre-boolean Pig versions
+   * Pig &lt; 0.10.0 does not have boolean support, and scripts written for 
pre-boolean Pig versions
    * will not expect boolean values when upgrading Pig. For integration the 
option is offered to
    * convert boolean fields to integers by setting this Hadoop configuration 
key.
    */
diff --git 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
index 75d55af..e851d2a 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
@@ -300,11 +300,11 @@ public class HCatUtil {
    * Test if the first FsAction is more permissive than the second. This is
    * useful in cases where we want to ensure that a file owner has more
    * permissions than the group they belong to, for eg. More completely(but
-   * potentially more cryptically) owner-r >= group-r >= world-r : bitwise
-   * and-masked with 0444 => 444 >= 440 >= 400 >= 000 owner-w >= group-w >=
-   * world-w : bitwise and-masked with &0222 => 222 >= 220 >= 200 >= 000
-   * owner-x >= group-x >= world-x : bitwise and-masked with &0111 => 111 >=
-   * 110 >= 100 >= 000
+   * potentially more cryptically) owner-r &gt;= group-r &gt;= world-r : 
bitwise
+   * and-masked with 0444 =&gt; 444 &gt;= 440 &gt;= 400 &gt;= 000 owner-w 
&gt;= group-w &gt;=
+   * world-w : bitwise and-masked with &amp;0222 =&gt; 222 &gt;= 220 &gt;= 200 
&gt;= 000
+   * owner-x &gt;= group-x &gt;= world-x : bitwise and-masked with &amp;0111 
=&gt; 111 &gt;=
+   * 110 &gt;= 100 &gt;= 000
    *
    * @return true if first FsAction is more permissive than the second, false
    *         if not.
@@ -579,9 +579,9 @@ public class HCatUtil {
 
   /**
    * Get or create a hive client depending on whether it exits in cache or not.
-   * @Deprecated : use {@link #getHiveMetastoreClient(HiveConf)} instead.
+   * @deprecated : use {@link #getHiveMetastoreClient(HiveConf)} instead.
    * This was deprecated in Hive 1.2, slated for removal in two versions
-   * (i.e. 1.2 & 1.3(projected) will have it, but it will be removed after 
that)
+   * (i.e. 1.2 &amp; 1.3(projected) will have it, but it will be removed after 
that)
    * @param hiveConf The hive configuration
    * @return the client
    * @throws MetaException When HiveMetaStoreClient couldn't be created
diff --git 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/HCatWriter.java
 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/HCatWriter.java
index 46d1b85..65b9ac2 100644
--- 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/HCatWriter.java
+++ 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/HCatWriter.java
@@ -58,7 +58,7 @@ public abstract class HCatWriter {
    *
    * @param recordItr
    *          {@link Iterator} records to be written into HCatalog.
-   * @throws {@link HCatException}
+   * @throws  HCatException
    */
   public abstract void write(final Iterator<HCatRecord> recordItr)
     throws HCatException;
@@ -67,7 +67,7 @@ public abstract class HCatWriter {
    * This method should be called at master node. Primary purpose of this is to
    * do metadata commit.
    *
-   * @throws {@link HCatException}
+   * @throws HCatException
    */
   public abstract void commit(final WriterContext context) throws 
HCatException;
 
@@ -75,7 +75,7 @@ public abstract class HCatWriter {
    * This method should be called at master node. Primary purpose of this is to
    * do cleanups in case of failures.
    *
-   * @throws {@link HCatException} *
+   * @throws HCatException
    */
   public abstract void abort(final WriterContext context) throws HCatException;
 
diff --git 
a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
 
b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
index 1406e5a..efafe0c 100644
--- 
a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
+++ 
b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
@@ -150,7 +150,7 @@ public class NotificationListener extends 
MetaStoreEventListener {
    * particular table by listening on a topic named "dbName.tableName" with 
message selector
    * string {@value org.apache.hive.hcatalog.common.HCatConstants#HCAT_EVENT} =
    * {@value 
org.apache.hive.hcatalog.common.HCatConstants#HCAT_DROP_PARTITION_EVENT}.
-   * </br>
+   * <br>
    * TODO: DataNucleus 2.0.3, currently used by the HiveMetaStore for 
persistence, has been
    * found to throw NPE when serializing objects that contain null. For this 
reason we override
    * some fields in the StorageDescriptor of this notification. This should be 
fixed after
@@ -264,7 +264,7 @@ public class NotificationListener extends 
MetaStoreEventListener {
    * dropped tables by listening on topic "HCAT" with message selector string
    * {@value org.apache.hive.hcatalog.common.HCatConstants#HCAT_EVENT} =
    * {@value 
org.apache.hive.hcatalog.common.HCatConstants#HCAT_DROP_TABLE_EVENT}
-   * </br>
+   * <br>
    * TODO: DataNucleus 2.0.3, currently used by the HiveMetaStore for 
persistence, has been
    * found to throw NPE when serializing objects that contain null. For this 
reason we override
    * some fields in the StorageDescriptor of this notification. This should be 
fixed after
diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
index 5e12254..bc99b6c 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
@@ -54,7 +54,7 @@ import java.util.List;
 import java.util.Properties;
 
 /**
- * @deprecated as of Hive 3.0.0, replaced by {@link 
org.apache.hive.streaming.AbstractRecordWriter}
+ * @deprecated as of Hive 3.0.0, replaced by 
org.apache.hive.streaming.AbstractRecordWriter
  */
 @Deprecated
 public abstract class AbstractRecordWriter implements RecordWriter {
diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java
index 32dae45..85c3429 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java
@@ -44,9 +44,9 @@ import java.util.Properties;
 
 /**
  * Streaming Writer handles delimited input (eg. CSV).
- * Delimited input is parsed & reordered to match column order in table
+ * Delimited input is parsed &amp; reordered to match column order in table
  * Uses Lazy Simple Serde to process delimited input
- * @deprecated as of Hive 3.0.0, replaced by {@link 
org.apache.hive.streaming.StrictDelimitedInputWriter}
+ * @deprecated as of Hive 3.0.0, replaced by 
org.apache.hive.streaming.StrictDelimitedInputWriter
  */
 @Deprecated
 public class DelimitedInputWriter extends AbstractRecordWriter {
diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
index 3604630..66a1737 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
@@ -67,7 +67,7 @@ import java.util.Map;
  * Information about the hive end point (i.e. table or partition) to write to.
  * A light weight object that does NOT internally hold on to resources such as
  * network connections. It can be stored in Hashed containers such as sets and 
hash tables.
- * @deprecated as of Hive 3.0.0, replaced by {@link 
org.apache.hive.streaming.HiveStreamingConnection}
+ * @deprecated as of Hive 3.0.0, replaced by 
org.apache.hive.streaming.HiveStreamingConnection
  */
 @Deprecated
 public class HiveEndPoint {
diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java
index 19078d2..0f3c0bc 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java
@@ -20,7 +20,7 @@ package org.apache.hive.hcatalog.streaming;
 
 
 /**
- * @deprecated as of Hive 3.0.0, replaced by {@link 
org.apache.hive.streaming.RecordWriter}
+ * @deprecated as of Hive 3.0.0, replaced by 
org.apache.hive.streaming.RecordWriter
  */
 @Deprecated
 public interface RecordWriter {
diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StreamingConnection.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StreamingConnection.java
index 0de8abc..3af9aed 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StreamingConnection.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StreamingConnection.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.security.UserGroupInformation;
  * Represents a connection to a HiveEndPoint. Used to acquire transaction 
batches.
  * Note: the expectation is that there is at most 1 TransactionBatch 
outstanding for any given
  * StreamingConnection.  Violating this may result in "out of sequence 
response".
- * @deprecated as of Hive 3.0.0, replaced by {@link 
org.apache.hive.streaming.HiveStreamingConnection}
+ * @deprecated as of Hive 3.0.0, replaced by 
org.apache.hive.streaming.HiveStreamingConnection
  */
 @Deprecated
 public interface StreamingConnection {
diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java
index 48e7e49..d588f71 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java
@@ -37,7 +37,7 @@ import java.util.Properties;
 /**
  * Streaming Writer handles utf8 encoded Json (Strict syntax).
  * Uses org.apache.hive.hcatalog.data.JsonSerDe to process Json input
- * @deprecated as of Hive 3.0.0, replaced by {@link 
org.apache.hive.streaming.StrictJsonWriter}
+ * @deprecated as of Hive 3.0.0, replaced by 
org.apache.hive.streaming.StrictJsonWriter
  */
 @Deprecated
 public class StrictJsonWriter extends AbstractRecordWriter {
diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java
index f0540e0..6a9a47e 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.io.Text;
 /**
  * Streaming Writer handles text input data with regex. Uses
  * org.apache.hadoop.hive.serde2.RegexSerDe
- * @deprecated as of Hive 3.0.0, replaced by {@link 
org.apache.hive.streaming.StrictRegexWriter}
+ * @deprecated as of Hive 3.0.0, replaced by 
org.apache.hive.streaming.StrictRegexWriter
  */
 @Deprecated
 public class StrictRegexWriter extends AbstractRecordWriter {
diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java
index 400fd49..96aae02 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java
@@ -30,7 +30,7 @@ import java.util.Collection;
  * Note on thread safety: At most 2 threads can run through a given 
TransactionBatch at the same
  * time.  One thread may call {@link #heartbeat()} and the other all other 
methods.
  * Violating this may result in "out of sequence response".
- * @deprecated as of Hive 3.0.0, replaced by {@link 
org.apache.hive.streaming.HiveStreamingConnection}
+ * @deprecated as of Hive 3.0.0, replaced by 
org.apache.hive.streaming.HiveStreamingConnection
  */
 @Deprecated
 public interface TransactionBatch  {
diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java
index 67785d0..a90d5d3 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java
@@ -47,7 +47,7 @@ import org.slf4j.LoggerFactory;
  * previously closed. The {@link MutatorCoordinator} will seamlessly handle 
transitions between groups, creating and
  * closing {@link Mutator Mutators} as needed to write to the appropriate 
partition and bucket. New partitions will be
  * created in the meta store if {@link AcidTable#createPartitions()} is set.
- * <p/>
+ * <p>
  * {@link #insert(List, Object) Insert} events must be artificially assigned 
appropriate bucket ids in the preceding
  * grouping phase so that they are grouped correctly. Note that any write id 
or row id assigned to the
  * {@link RecordIdentifier RecordIdentifier} of such events will be ignored by 
both the coordinator and the underlying
diff --git 
a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java
 
b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java
index 8455a3f..943a5a8 100644
--- 
a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java
+++ 
b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java
@@ -188,7 +188,7 @@ public abstract class HCatClient {
    * Serializer for HCatTable.
    * @param hcatTable The HCatTable to be serialized into string form
    * @return String representation of the HCatTable.
-   * @throws HCatException, on failure to serialize.
+   * @throws HCatException on failure to serialize.
    */
   public abstract String serializeTable(HCatTable hcatTable) throws 
HCatException;
 
@@ -204,7 +204,7 @@ public abstract class HCatClient {
    * Serializer for HCatPartition.
    * @param hcatPartition The HCatPartition instance to be serialized.
    * @return String representation of the HCatPartition.
-   * @throws HCatException, on failure to serialize.
+   * @throws HCatException on failure to serialize.
    */
   public abstract String serializePartition(HCatPartition hcatPartition) 
throws HCatException;
 
@@ -212,7 +212,7 @@ public abstract class HCatClient {
    * Serializer for a list of HCatPartition.
    * @param hcatPartitions The HCatPartitions to be serialized.
    * @return A list of Strings, each representing an HCatPartition.
-   * @throws HCatException, on failure to serialize.
+   * @throws HCatException on failure to serialize.
    */
   public abstract List<String> serializePartitions(List<HCatPartition> 
hcatPartitions) throws HCatException;
 
@@ -220,7 +220,7 @@ public abstract class HCatClient {
    * Deserializer for an HCatPartition.
    * @param hcatPartitionStringRep The String representation of the 
HCatPartition, presumably retrieved from {@link 
#serializePartition(HCatPartition)}
    * @return HCatPartition instance reconstructed from the string.
-   * @throws HCatException, on failure to deserialze.
+   * @throws HCatException on failure to deserialze.
    */
   public abstract HCatPartition deserializePartition(String 
hcatPartitionStringRep) throws HCatException;
 
@@ -228,7 +228,7 @@ public abstract class HCatClient {
    * Deserializer for a list of HCatPartition strings.
    * @param hcatPartitionStringReps The list of HCatPartition strings to be 
deserialized.
    * @return A list of HCatPartition instances, each reconstructed from an 
entry in the string-list.
-   * @throws HCatException, on failure to deserialize.
+   * @throws HCatException on failure to deserialize.
    */
   public abstract List<HCatPartition> deserializePartitions(List<String> 
hcatPartitionStringReps) throws HCatException;
 
@@ -389,7 +389,8 @@ public abstract class HCatClient {
    * @param tableName The table name.
    * @param partitionSpec The partition specification, 
{[col_name,value],[col_name2,value2]}.
    * @param ifExists Hive returns an error if the partition specified does not 
exist, unless ifExists is set to true.
-   * @throws HCatException,ConnectionFailureException
+   * @throws HCatException
+   * @throws ConnectionFailureException
    */
   public abstract void dropPartitions(String dbName, String tableName,
                     Map<String, String> partitionSpec, boolean ifExists)
@@ -408,7 +409,8 @@ public abstract class HCatClient {
    * @param partitionSpec The partition specification, 
{[col_name,value],[col_name2,value2]}.
    * @param ifExists Hive returns an error if the partition specified does not 
exist, unless ifExists is set to true.
    * @param deleteData Whether to delete the underlying data.
-   * @throws HCatException,ConnectionFailureException
+   * @throws HCatException
+   * @throws ConnectionFailureException
    */
    public abstract void dropPartitions(String dbName, String tableName,
                     Map<String, String> partitionSpec, boolean ifExists, 
boolean deleteData)
@@ -419,7 +421,7 @@ public abstract class HCatClient {
    * @param dbName The database name.
    * @param tblName The table name.
    * @param filter The filter string,
-   *    for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering 
can
+   *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". 
Filtering can
    *    be done only on string partition keys.
    * @return list of partitions
    * @throws HCatException
@@ -468,7 +470,8 @@ public abstract class HCatClient {
    * @param owner the owner
    * @param renewerKerberosPrincipalName the renewer kerberos principal name
    * @return the delegation token
-   * @throws HCatException,ConnectionFailureException
+   * @throws HCatException
+   * @throws ConnectionFailureException
    */
   public abstract String getDelegationToken(String owner,
                         String renewerKerberosPrincipalName) throws 
HCatException;
@@ -498,7 +501,7 @@ public abstract class HCatClient {
    * @param dbName The name of the DB.
    * @param tableName The name of the table.
    * @return Topic-name for the message-bus on which messages will be sent for 
the specified table.
-   * By default, this is set to <db-name>.<table-name>. Returns null when not 
set.
+   * By default, this is set to &lt;db-name&gt;.&lt;table-name&gt;. Returns 
null when not set.
    */
   public abstract String getMessageBusTopicName(String dbName, String 
tableName) throws HCatException;
 
@@ -509,7 +512,7 @@ public abstract class HCatClient {
    * @param lastEventId : The last event id that was processed for this 
reader. The returned
    *                    replication tasks will start from this point forward
    * @param maxEvents : Maximum number of events to consider for generating the
-   *                  replication tasks. If < 1, then all available events 
will be considered.
+   *                  replication tasks. If &lt; 1, then all available events 
will be considered.
    * @param dbName : The database name for which we're interested in the 
events for.
    * @param tableName : The table name for which we're interested in the 
events for - if null,
    *                  then this function will behave as if it were running at 
a db level.
@@ -525,7 +528,7 @@ public abstract class HCatClient {
    * @param lastEventId The last event id that was consumed by this reader.  
The returned
    *                    notifications will start at the next eventId available 
this eventId that
    *                    matches the filter.
-   * @param maxEvents Maximum number of events to return.  If < 1, then all 
available events will
+   * @param maxEvents Maximum number of events to return.  If &lt; 1, then all 
available events will
    *                  be returned.
    * @param filter Filter to determine if message should be accepted.  If 
null, then all
    *               available events up to maxEvents will be returned.
diff --git 
a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationTask.java
 
b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationTask.java
index 7aa8744..7c9c5a5 100644
--- 
a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationTask.java
+++ 
b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationTask.java
@@ -180,7 +180,7 @@ public abstract class ReplicationTask {
    * throws an IllegalArgumentException as well, a ReplicationTask will use 
the same key sent in.
    * That way, the default will then be that the destination db name is the 
same as the src db name
    *
-   * If you want to use a Map<String,String> mapping instead of a 
Function<String,String>,
+   * If you want to use a Map&lt;String,String&gt; mapping instead of a 
Function&lt;String,String&gt;,
    * simply call this function as 
.withTableNameMapping(ReplicationUtils.mapBasedFunction(tableMap))
    * @param tableNameMapping
    * @return this replication task
@@ -197,7 +197,7 @@ public abstract class ReplicationTask {
    * throws an IllegalArgumentException as well, a ReplicationTask will use 
the same key sent in.
    * That way, the default will then be that the destination db name is the 
same as the src db name
    *
-   * If you want to use a Map<String,String> mapping instead of a 
Function<String,String>,
+   * If you want to use a Map&lt;String,String&gt; mapping instead of a 
Function&lt;String,String&gt;,
    * simply call this function as 
.withDbNameMapping(ReplicationUtils.mapBasedFunction(dbMap))
    * @param dbNameMapping
    * @return this replication task
@@ -214,9 +214,9 @@ public abstract class ReplicationTask {
   }
 
   /**
-   * Returns a Iterable<Command> to send to a hive driver on the source 
warehouse
+   * Returns a Iterable&lt;Command&gt; to send to a hive driver on the source 
warehouse
    *
-   * If you *need* a List<Command> instead, you can use guava's
+   * If you *need* a List&lt;Command&gt; instead, you can use guava's
    * ImmutableList.copyOf(iterable) or Lists.newArrayList(iterable) to
    * get the underlying list, but this defeats the purpose of making this
    * interface an Iterable rather than a List, since it is very likely
@@ -226,9 +226,9 @@ public abstract class ReplicationTask {
   abstract public Iterable<? extends Command> getSrcWhCommands();
 
   /**
-   * Returns a Iterable<Command> to send to a hive driver on the source 
warehouse
+   * Returns a Iterable&lt;Command&gt; to send to a hive driver on the source 
warehouse
    *
-   * If you *need* a List<Command> instead, you can use guava's
+   * If you *need* a List&lt;Command&gt; instead, you can use guava's
    * ImmutableList.copyOf(iterable) or Lists.newArrayList(iterable) to
    * get the underlying list, but this defeats the purpose of making this
    * interface an Iterable rather than a List, since it is very likely
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Meta.java 
b/hplsql/src/main/java/org/apache/hive/hplsql/Meta.java
index 52a702e..e56e8e6 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Meta.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Meta.java
@@ -259,7 +259,7 @@ public class Meta {
   }
   
   /**
-   * Split qualified object to 2 parts: schema.tab.col -> schema.tab|col; 
tab.col -> tab|col 
+   * Split qualified object to 2 parts: schema.tab.col -&gt; schema.tab|col; 
tab.col -&gt; tab|col
    */
   public ArrayList<String> splitIdentifierToTwoParts(String name) {
     ArrayList<String> parts = splitIdentifier(name);    
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Utils.java 
b/hplsql/src/main/java/org/apache/hive/hplsql/Utils.java
index 6bc0568..2a86f55 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Utils.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Utils.java
@@ -52,7 +52,7 @@ public class Utils {
   }
 
   /**
-   * Quote string and escape characters - ab'c -> 'ab''c'
+   * Quote string and escape characters - ab'c -&gt; 'ab''c'
    */
   public static String quoteString(String s) {
     if(s == null) {
@@ -73,7 +73,7 @@ public class Utils {
   }
   
   /**
-   * Merge quoted strings: 'a' 'b' -> 'ab'; 'a''b' 'c' -> 'a''bc'
+   * Merge quoted strings: 'a' 'b' -&gt; 'ab'; 'a''b' 'c' -&gt; 'a''bc'
    */
   public static String mergeQuotedStrings(String s1, String s2) {
          if(s1 == null || s2 == null) {
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Var.java 
b/hplsql/src/main/java/org/apache/hive/hplsql/Var.java
index d1151e7..0a5aa9e 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Var.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Var.java
@@ -624,7 +624,7 @@ public class Var {
        }
 
   /**
-   * Convert value to SQL string - string literals are quoted and escaped, 
ab'c -> 'ab''c'
+   * Convert value to SQL string - string literals are quoted and escaped, 
ab'c -&gt; 'ab''c'
    */
   public String toSqlString() {
     if (value == null) {
diff --git a/jdbc/src/java/org/apache/hive/jdbc/Utils.java 
b/jdbc/src/java/org/apache/hive/jdbc/Utils.java
index 852942e..e0200b7 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/Utils.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/Utils.java
@@ -307,8 +307,8 @@ public class Utils {
   /**
    * Parse JDBC connection URL
    * The new format of the URL is:
-   * 
jdbc:hive2://<host1>:<port1>,<host2>:<port2>/dbName;sess_var_list?hive_conf_list#hive_var_list
-   * where the optional sess, conf and var lists are semicolon separated 
<key>=<val> pairs.
+   * 
jdbc:hive2://&lt;host1&gt;:&lt;port1&gt;,&lt;host2&gt;:&lt;port2&gt;/dbName;sess_var_list?hive_conf_list#hive_var_list
+   * where the optional sess, conf and var lists are semicolon separated 
&lt;key&gt;=&lt;val&gt; pairs.
    * For utilizing dynamic service discovery with HiveServer2 multiple comma 
separated host:port pairs can
    * be specified as shown above.
    * The JDBC driver resolves the list of uris and picks a specific server 
instance to connect to.
diff --git a/llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapDump.java 
b/llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapDump.java
index 479e053..b2e4d78 100644
--- a/llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapDump.java
+++ b/llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapDump.java
@@ -36,7 +36,8 @@ import org.apache.hadoop.mapred.InputSplit;
 
 /**
  * Utility to test query and data retrieval via the LLAP input format.
- * llapdump --hiveconf hive.zookeeper.quorum=localhost --hiveconf 
hive.zookeeper.client.port=2181 --hiveconf 
hive.llap.daemon.service.hosts=@llap_MiniLlapCluster 'select * from employee 
where employee_id < 10'
+ * llapdump --hiveconf hive.zookeeper.quorum=localhost --hiveconf 
hive.zookeeper.client.port=2181\
+ *   --hiveconf hive.llap.daemon.service.hosts=@llap_MiniLlapCluster 'select * 
from employee where employee_id &lt; 10'
  *
  */
 public class LlapDump {
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/ConsumerFeedback.java 
b/llap-server/src/java/org/apache/hadoop/hive/llap/ConsumerFeedback.java
index b71a358..82c581e 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/ConsumerFeedback.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/ConsumerFeedback.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hive.llap;
 
 /**
- * Consumer feedback typically used by Consumer<T>;
+ * Consumer feedback typically used by Consumer&lt;T&gt;;
  * allows consumer to influence production of data.
  */
 public interface ConsumerFeedback<T> {
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java
index 9591e48..4dd3826 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java
@@ -47,7 +47,7 @@ public interface LowLevelCache {
    *    can be thrown away, the reader will never touch it; but we need code 
in the reader to
    *    handle such cases to avoid disk reads for these "tails" vs real 
unmatched ranges.
    *    Some sort of InvalidCacheChunk could be placed to avoid them. TODO
-   * @param base base offset for the ranges (stripe/stream offset in case of 
ORC).
+   * @param baseOffset base offset for the ranges (stripe/stream offset in 
case of ORC).
    */
   DiskRangeList getFileData(Object fileKey, DiskRangeList range, long 
baseOffset,
       DiskRangeListFactory factory, LowLevelCacheCounters qfCounters, 
BooleanRef gotAllData);
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/PriorityBlockingDeque.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/PriorityBlockingDeque.java
index 1ac8ec6..925bd9f 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/PriorityBlockingDeque.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/PriorityBlockingDeque.java
@@ -26,15 +26,15 @@ import java.util.concurrent.locks.ReentrantLock;
 /**
  * An optionally-bounded {@linkplain BlockingDeque blocking deque} based on
  * a navigable set.
- * <p/>
+ * <br>
  * <p> The optional capacity bound constructor argument serves as a
  * way to prevent excessive expansion. The capacity, if unspecified,
  * is equal to {@link Integer#MAX_VALUE}.
- * <p/>
+ * <br>
  * <p>This class and its iterator implement all of the
  * <em>optional</em> methods of the {@link Collection} and {@link
  * Iterator} interfaces.
- * <p/>
+ * <br>
  * This code is loosely based on the {@linkplain 
java.util.concurrent.LinkedBlockingDeque linked blocking deque} code.
  */
 public class PriorityBlockingDeque<E>
@@ -439,8 +439,8 @@ public class PriorityBlockingDeque<E>
    * Inserts the specified element to the deque unless it would
    * violate capacity restrictions.  When using a capacity-restricted deque,
    * it is generally preferable to use method {@link #offer(Object) offer}.
-   * <p/>
-   * <p>This method is equivalent to {@link #addLast}.
+   * <br>
+   * This method is equivalent to {@link #addLast}.
    *
    * @throws IllegalStateException if the element cannot be added at this
    *                               time due to capacity restrictions
@@ -481,8 +481,8 @@ public class PriorityBlockingDeque<E>
    * Retrieves and removes the head of the queue represented by this deque.
    * This method differs from {@link #poll poll} only in that it throws an
    * exception if this deque is empty.
-   * <p/>
-   * <p>This method is equivalent to {@link #removeFirst() removeFirst}.
+   * <br>
+   * This method is equivalent to {@link #removeFirst() removeFirst}.
    *
    * @return the head of the queue represented by this deque
    * @throws NoSuchElementException if this deque is empty
@@ -508,8 +508,8 @@ public class PriorityBlockingDeque<E>
    * Retrieves, but does not remove, the head of the queue represented by
    * this deque.  This method differs from {@link #peek peek} only in that
    * it throws an exception if this deque is empty.
-   * <p/>
-   * <p>This method is equivalent to {@link #getFirst() getFirst}.
+   * <br>
+   * This method is equivalent to {@link #getFirst() getFirst}.
    *
    * @return the head of the queue represented by this deque
    * @throws NoSuchElementException if this deque is empty
@@ -528,8 +528,8 @@ public class PriorityBlockingDeque<E>
    * (in the absence of memory or resource constraints) accept without
    * blocking. This is always equal to the initial capacity of this deque
    * less the current <tt>size</tt> of this deque.
-   * <p/>
-   * <p>Note that you <em>cannot</em> always tell if an attempt to insert
+   * <br>
+   * Note that you <em>cannot</em> always tell if an attempt to insert
    * an element will succeed by inspecting <tt>remainingCapacity</tt>
    * because it may be the case that another thread is about to
    * insert or remove an element.
@@ -622,8 +622,8 @@ public class PriorityBlockingDeque<E>
    * <tt>o.equals(e)</tt> (if such an element exists).
    * Returns <tt>true</tt> if this deque contained the specified element
    * (or equivalently, if this deque changed as a result of the call).
-   * <p/>
-   * <p>This method is equivalent to
+   * <br>
+   * This method is equivalent to
    * {@link #removeFirstOccurrence(Object) removeFirstOccurrence}.
    *
    * @param o element to be removed from this deque, if present
@@ -671,12 +671,12 @@ public class PriorityBlockingDeque<E>
   /**
    * Returns an array containing all of the elements in this deque, in
    * proper sequence (from first to last element).
-   * <p/>
+   * <br>
    * <p>The returned array will be "safe" in that no references to it are
    * maintained by this deque.  (In other words, this method must allocate
    * a new array).  The caller is thus free to modify the returned array.
-   * <p/>
-   * <p>This method acts as bridge between array-based and collection-based
+   * </p>
+   * <br>This method acts as bridge between array-based and collection-based
    * APIs.
    *
    * @return an array containing all of the elements in this deque
@@ -697,24 +697,24 @@ public class PriorityBlockingDeque<E>
    * the specified array.  If the deque fits in the specified array, it
    * is returned therein.  Otherwise, a new array is allocated with the
    * runtime type of the specified array and the size of this deque.
-   * <p/>
+   * <br>
    * <p>If this deque fits in the specified array with room to spare
    * (i.e., the array has more elements than this deque), the element in
    * the array immediately following the end of the deque is set to
    * <tt>null</tt>.
-   * <p/>
+   * </p>
    * <p>Like the {@link #toArray()} method, this method acts as bridge between
    * array-based and collection-based APIs.  Further, this method allows
    * precise control over the runtime type of the output array, and may,
    * under certain circumstances, be used to save allocation costs.
-   * <p/>
+   * </p>
    * <p>Suppose <tt>x</tt> is a deque known to contain only strings.
    * The following code can be used to dump the deque into a newly
    * allocated array of <tt>String</tt>:
-   * <p/>
+   * </p>
    * <pre>
    *     String[] y = x.toArray(new String[0]);</pre>
-   * <p/>
+   * <br>
    * Note that <tt>toArray(new Object[0])</tt> is identical in function to
    * <tt>toArray()</tt>.
    *
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
index 047a55c..aaf9674 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
@@ -76,15 +76,15 @@ import 
com.google.common.util.concurrent.ThreadFactoryBuilder;
  * are available or when a higher priority task arrives and will schedule it 
for execution.
  * When pre-emption is enabled, the tasks from wait queue can 
replace(pre-empt) a running task.
  * The pre-empted task is reported back to the Application Master(AM) for it 
to be rescheduled.
- * <p/>
+ * <br>
  * Because of the concurrent nature of task submission, the position of the 
task in wait queue is
  * held as long the scheduling of the task from wait queue (with or without 
pre-emption) is complete.
  * The order of pre-emption is based on the ordering in the pre-emption queue. 
All tasks that cannot
  * run to completion immediately (canFinish = false) are added to pre-emption 
queue.
- * <p/>
+ * <br>
  * When all the executor threads are occupied and wait queue is full, the task 
scheduler will
  * return SubmissionState.REJECTED response
- * <p/>
+ * <br>
  * Task executor service can be shut down which will terminated all running 
tasks and reject all
  * new tasks. Shutting down of the task executor service can be done 
gracefully or immediately.
  */
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
index 7f436e2..e86a96c 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
@@ -344,7 +344,7 @@ public class TaskRunnerCallable extends 
CallableWithNdc<TaskRunner2Result> {
   /**
    * Attempt to kill a running task. If the task has not started running, it 
will not start.
    * If it's already running, a kill request will be sent to it.
-   * <p/>
+   * <br>
    * The AM will be informed about the task kill.
    */
   public void killTask() {
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
index e14f314..54b6135 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
@@ -24,7 +24,7 @@ import com.google.common.base.Objects;
 
 /**
  * Llap daemon JVM info. These are some additional metrics that are not 
exposed via
- * {@link org.apache.hadoop.metrics.jvm.JvmMetrics}
+ * {@link org.apache.hadoop.hive.common.JvmMetrics}
  *
  * NOTE: These metrics are for sinks supported by hadoop-metrics2. There is 
already a /jmx endpoint
  * that gives all these info.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
index 73acc31..95028cc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
@@ -116,10 +116,6 @@ public class DDLWork implements Serializable {
     this.alterTblDesc = alterTblDesc;
   }
 
-  /**
-   * @param dropTblDesc
-   *          drop table descriptor
-   */
   public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
       DropPartitionDesc dropPartitionDesc) {
     this(inputs, outputs);
diff --git 
a/serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinaryDataInputStream.java
 
b/serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinaryDataInputStream.java
index b26d342..3cb21b7 100644
--- 
a/serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinaryDataInputStream.java
+++ 
b/serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinaryDataInputStream.java
@@ -108,8 +108,8 @@ public class TeradataBinaryDataInputStream extends 
SwappedDataInputStream {
    * Read DATE.
    * The representation of date in Teradata binary format is:
    * The Date D is a int with 4 bytes using little endian,
-   * The representation is (D+19000000).ToString -> YYYYMMDD,
-   * eg: Date 07 b2 01 00 -> 111111 in little endian -> 19111111 - > 
1911.11.11.
+   * The representation is (D+19000000).ToString -&gt; YYYYMMDD,
+   * eg: Date 07 b2 01 00 -&gt; 111111 in little endian -&gt; 19111111 - &gt; 
1911.11.11.
    * the null date will use 0 to pad.
    *
    * @return the date
@@ -135,7 +135,7 @@ public class TeradataBinaryDataInputStream extends 
SwappedDataInputStream {
   /**
    * Read CHAR(N).
    * The representation of char in Teradata binary format is
-   * the byte number to read is based on the [charLength] * [bytePerChar] <- 
totalLength,
+   * the byte number to read is based on the [charLength] * [bytePerChar] 
&lt;- totalLength,
    * bytePerChar is decided by the charset: LATAIN charset is 2 bytes per char 
and UNICODE charset is 3 bytes per char.
    * the null char will use space to pad.
    *
diff --git 
a/serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinaryDataOutputStream.java
 
b/serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinaryDataOutputStream.java
index f2f801d..3799aa2 100644
--- 
a/serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinaryDataOutputStream.java
+++ 
b/serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinaryDataOutputStream.java
@@ -138,8 +138,8 @@ public class TeradataBinaryDataOutputStream extends 
ByteArrayOutputStream {
    * Write DATE.
    * The representation of date in Teradata binary format is:
    * The Date D is a int with 4 bytes using little endian.
-   * The representation is (YYYYMMDD - 19000000).toInt -> D
-   * eg. 1911.11.11 -> 19111111 -> 111111 -> 07 b2 01 00 in little endian.
+   * The representation is (YYYYMMDD - 19000000).toInt -&gt; D
+   * eg. 1911.11.11 -&gt; 19111111 -&gt; 111111 -&gt; 07 b2 01 00 in little 
endian.
    * the null date will use 0 to pad.
    *
    * @param date the date
@@ -168,7 +168,7 @@ public class TeradataBinaryDataOutputStream extends 
ByteArrayOutputStream {
   /**
    * Write CHAR(N).
    * The representation of char in Teradata binary format is:
-   * the byte number to read is based on the [charLength] * [bytePerChar] <- 
totalLength,
+   * the byte number to read is based on the [charLength] * [bytePerChar] 
&lt;- totalLength,
    * bytePerChar is decided by the charset: LATAIN charset is 2 bytes per char 
and UNICODE charset is 3 bytes per char.
    * the null char will use space to pad.
    *
diff --git a/service/src/java/org/apache/hive/service/Service.java 
b/service/src/java/org/apache/hive/service/Service.java
index 51ff7c1..f989641 100644
--- a/service/src/java/org/apache/hive/service/Service.java
+++ b/service/src/java/org/apache/hive/service/Service.java
@@ -49,7 +49,7 @@ public interface Service {
    * The transition must be from {@link STATE#NOTINITED} to {@link 
STATE#INITED} unless the
    * operation failed and an exception was raised.
    *
-   * @param config
+   * @param conf
    *          the configuration of the service
    */
   void init(HiveConf conf);
diff --git a/service/src/java/org/apache/hive/service/ServiceOperations.java 
b/service/src/java/org/apache/hive/service/ServiceOperations.java
index 093bcab..1b68ea4 100644
--- a/service/src/java/org/apache/hive/service/ServiceOperations.java
+++ b/service/src/java/org/apache/hive/service/ServiceOperations.java
@@ -51,7 +51,7 @@ public final class ServiceOperations {
 
   /**
    * Initialize a service.
-   * <p/>
+   * <br>
    * The service state is checked <i>before</i> the operation begins.
    * This process is <i>not</i> thread safe.
    * @param service a service that must be in the state
@@ -69,7 +69,7 @@ public final class ServiceOperations {
 
   /**
    * Start a service.
-   * <p/>
+   * <br>
    * The service state is checked <i>before</i> the operation begins.
    * This process is <i>not</i> thread safe.
    * @param service a service that must be in the state
@@ -86,7 +86,7 @@ public final class ServiceOperations {
 
   /**
    * Initialize then start a service.
-   * <p/>
+   * <br>
    * The service state is checked <i>before</i> the operation begins.
    * This process is <i>not</i> thread safe.
    * @param service a service that must be in the state
@@ -102,9 +102,9 @@ public final class ServiceOperations {
 
   /**
    * Stop a service.
-   * <p/>Do nothing if the service is null or not
+   * <br>Do nothing if the service is null or not
    * in a state in which it can be/needs to be stopped.
-   * <p/>
+   * <br>
    * The service state is checked <i>before</i> the operation begins.
    * This process is <i>not</i> thread safe.
    * @param service a service or null
diff --git a/service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java 
b/service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java
index d18ac87..7dc11b2 100644
--- a/service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java
+++ b/service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java
@@ -78,7 +78,7 @@ public final class HttpAuthUtils {
    * @param clientUserName Client User name.
    * @return An unsigned cookie token generated from input parameters.
    * The final cookie generated is of the following format :
-   * cu=<username>&rn=<randomNumber>&s=<cookieSignature>
+   * 
cu=&lt;username&gt;&amp;rn=&lt;randomNumber&gt;&amp;s=&lt;cookieSignature&gt;
    */
   public static String createCookieToken(String clientUserName) {
     StringBuilder sb = new StringBuilder();
diff --git 
a/service/src/java/org/apache/hive/service/auth/PasswdAuthenticationProvider.java
 
b/service/src/java/org/apache/hive/service/auth/PasswdAuthenticationProvider.java
index 60e35b4..fdc6857 100644
--- 
a/service/src/java/org/apache/hive/service/auth/PasswdAuthenticationProvider.java
+++ 
b/service/src/java/org/apache/hive/service/auth/PasswdAuthenticationProvider.java
@@ -26,7 +26,7 @@ public interface PasswdAuthenticationProvider {
    * to authenticate users for their requests.
    * If a user is to be granted, return nothing/throw nothing.
    * When a user is to be disallowed, throw an appropriate {@link 
AuthenticationException}.
-   * <p/>
+   * <br>
    * For an example implementation, see {@link LdapAuthenticationProviderImpl}.
    *
    * @param user     The username received over the connection request
diff --git 
a/service/src/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java 
b/service/src/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java
index eb59642..8e4659b 100644
--- a/service/src/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java
+++ b/service/src/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java
@@ -31,12 +31,12 @@ import org.slf4j.LoggerFactory;
 
 /**
  * This class is responsible for setting the ipAddress for operations executed 
via HiveServer2.
- * <p>
+ * <br>
  * <ul>
  * <li>IP address is only set for operations that calls listeners with 
hookContext</li>
  * <li>IP address is only set if the underlying transport mechanism is 
socket</li>
  * </ul>
- * </p>
+ * <br>
  *
  * @see org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext
  */
diff --git 
a/service/src/java/org/apache/hive/service/auth/ldap/CustomQueryFilterFactory.java
 
b/service/src/java/org/apache/hive/service/auth/ldap/CustomQueryFilterFactory.java
index 200cb01..30ce1a6 100644
--- 
a/service/src/java/org/apache/hive/service/auth/ldap/CustomQueryFilterFactory.java
+++ 
b/service/src/java/org/apache/hive/service/auth/ldap/CustomQueryFilterFactory.java
@@ -30,7 +30,7 @@ import org.slf4j.LoggerFactory;
  * <br>
  * The produced filter object filters out all users that are not found in the 
search result
  * of the query provided in Hive configuration.
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY
+ * @see 
org.apache.hadoop.hive.conf.HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY
  */
 public class CustomQueryFilterFactory implements FilterFactory {
 
diff --git 
a/service/src/java/org/apache/hive/service/auth/ldap/GroupFilterFactory.java 
b/service/src/java/org/apache/hive/service/auth/ldap/GroupFilterFactory.java
index 9165227..5470ad7 100644
--- a/service/src/java/org/apache/hive/service/auth/ldap/GroupFilterFactory.java
+++ b/service/src/java/org/apache/hive/service/auth/ldap/GroupFilterFactory.java
@@ -35,7 +35,7 @@ import org.slf4j.LoggerFactory;
  * <br>
  * The produced filter object filters out all users that are not members of at 
least one of
  * the groups provided in Hive configuration.
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER
  */
 public final class GroupFilterFactory implements FilterFactory {
 
diff --git a/service/src/java/org/apache/hive/service/auth/ldap/LdapUtils.java 
b/service/src/java/org/apache/hive/service/auth/ldap/LdapUtils.java
index 5336c10..d3caaef 100644
--- a/service/src/java/org/apache/hive/service/auth/ldap/LdapUtils.java
+++ b/service/src/java/org/apache/hive/service/auth/ldap/LdapUtils.java
@@ -145,10 +145,10 @@ public final class LdapUtils {
    * @param conf Hive configuration
    * @param var variable to be read
    * @return a list of DN patterns
-   * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_BASEDN
-   * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GUIDKEY
-   * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN
-   * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN
+   * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_BASEDN
+   * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_GUIDKEY
+   * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN
+   * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN
    */
   public static List<String> parseDnPatterns(HiveConf conf, HiveConf.ConfVars 
var) {
     String patternsString = conf.getVar(var);
@@ -183,8 +183,8 @@ public final class LdapUtils {
    * Converts a collection of Distinguished Name patterns to a collection of 
base DNs.
    * @param patterns Distinguished Name patterns
    * @return a list of base DNs
-   * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN
-   * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN
+   * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN
+   * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN
    */
   public static List<String> patternsToBaseDns(Collection<String> patterns) {
     List<String> result = new ArrayList<>();
diff --git 
a/service/src/java/org/apache/hive/service/auth/ldap/SearchResultHandler.java 
b/service/src/java/org/apache/hive/service/auth/ldap/SearchResultHandler.java
index aac1160..5f0f3b6 100644
--- 
a/service/src/java/org/apache/hive/service/auth/ldap/SearchResultHandler.java
+++ 
b/service/src/java/org/apache/hive/service/auth/ldap/SearchResultHandler.java
@@ -147,7 +147,7 @@ public final class SearchResultHandler {
    * Implementations of this interface perform the actual work of processing 
each record,
    * but don't need to worry about exception handling, closing underlying data 
structures,
    * and combining results from several search requests.
-   * {@see SearchResultHandler}
+   * @see SearchResultHandler
    */
   public interface RecordProcessor {
 
diff --git 
a/service/src/java/org/apache/hive/service/auth/ldap/UserFilterFactory.java 
b/service/src/java/org/apache/hive/service/auth/ldap/UserFilterFactory.java
index cb00aa9..c3bcfd9 100644
--- a/service/src/java/org/apache/hive/service/auth/ldap/UserFilterFactory.java
+++ b/service/src/java/org/apache/hive/service/auth/ldap/UserFilterFactory.java
@@ -30,7 +30,7 @@ import org.slf4j.LoggerFactory;
  * <br>
  * The produced filter object filters out all users that are not on the 
provided in
  * Hive configuration list.
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERFILTER
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_USERFILTER
  */
 public final class UserFilterFactory implements FilterFactory {
 
diff --git a/service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java 
b/service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
index 56ee54c..199e902 100644
--- a/service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
+++ b/service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
@@ -31,7 +31,7 @@ public class CLIServiceUtils {
    * Convert a SQL search pattern into an equivalent Java Regex.
    *
    * @param pattern input which may contain '%' or '_' wildcard characters, or
-   * these characters escaped using {@link #getSearchStringEscape()}.
+   * these characters escaped using getSearchStringEscape().
    * @return replace %/_ with regex search characters, also handle escaped
    * characters.
    */
diff --git 
a/service/src/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java
 
b/service/src/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java
index ae7be23..aca169e 100644
--- 
a/service/src/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java
+++ 
b/service/src/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java
@@ -36,9 +36,9 @@ import com.google.common.collect.Multimap;
 /**
  * ClassicTableTypeMapping.
  * Classic table type mapping :
- *  Managed Table ==> Table
- *  External Table ==> Table
- *  Virtual View ==> View
+ *  Managed Table ==&gt; Table
+ *  External Table ==&gt; Table
+ *  Virtual View ==&gt; View
  */
 public class ClassicTableTypeMapping implements TableTypeMapping {
 
diff --git 
a/service/src/java/org/apache/hive/service/cli/operation/TableTypeMapping.java 
b/service/src/java/org/apache/hive/service/cli/operation/TableTypeMapping.java
index eb8c712..3f2a89b 100644
--- 
a/service/src/java/org/apache/hive/service/cli/operation/TableTypeMapping.java
+++ 
b/service/src/java/org/apache/hive/service/cli/operation/TableTypeMapping.java
@@ -35,7 +35,7 @@ public interface TableTypeMapping {
 
   /**
    * Map hive's table type name to client's table type
-   * @param clientTypeName
+   * @param hiveTypeName
    * @return
    */
   public String mapToClientType (String hiveTypeName);
diff --git 
a/service/src/java/org/apache/hive/service/server/ThreadFactoryWithGarbageCleanup.java
 
b/service/src/java/org/apache/hive/service/server/ThreadFactoryWithGarbageCleanup.java
index 805934f..633bf89 100644
--- 
a/service/src/java/org/apache/hive/service/server/ThreadFactoryWithGarbageCleanup.java
+++ 
b/service/src/java/org/apache/hive/service/server/ThreadFactoryWithGarbageCleanup.java
@@ -30,12 +30,12 @@ import org.apache.hadoop.hive.metastore.RawStore;
  * in custom cleanup code to be called before this thread is GC-ed.
  * Currently cleans up the following:
  * 1. ThreadLocal RawStore object:
- * In case of an embedded metastore, HiveServer2 threads (foreground & 
background)
+ * In case of an embedded metastore, HiveServer2 threads (foreground &amp; 
background)
  * end up caching a ThreadLocal RawStore object. The ThreadLocal RawStore 
object has
- * an instance of PersistenceManagerFactory & PersistenceManager.
+ * an instance of PersistenceManagerFactory &amp; PersistenceManager.
  * The PersistenceManagerFactory keeps a cache of PersistenceManager objects,
  * which are only removed when PersistenceManager#close method is called.
- * HiveServer2 uses ExecutorService for managing thread pools for foreground & 
background threads.
+ * HiveServer2 uses ExecutorService for managing thread pools for foreground 
&amp; background threads.
  * ExecutorService unfortunately does not provide any hooks to be called,
  * when a thread from the pool is terminated.
  * As a solution, we're using this ThreadFactory to keep a cache of RawStore 
objects per thread.
diff --git 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
index 3a827f7..76c97d5 100644
--- 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
+++ 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
@@ -35,7 +35,7 @@ import java.util.List;
  *
  *<p>
  *
- * Implementations can use {@link MetaStoreUtils#isExternalTable} to
+ * Implementations can use {@link 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils#isExternalTable} to
  * distinguish external tables from managed tables.
  */
 @InterfaceAudience.Public
diff --git 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 8402ba5..7af9245 100644
--- 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -2708,7 +2708,6 @@ public interface IMetaStoreClient {
       throws MetaException, TException;
 
   /**
-   * @param revokePrivileges
    * @param authorizer
    * @param objToRefresh
    * @return true on success
@@ -2894,7 +2893,7 @@ public interface IMetaStoreClient {
 
   /**
    * Get a structure that details valid write ids.
-   * @param fullTableName full table name of format <db_name>.<table_name>
+   * @param fullTableName full table name of format 
&lt;db_name&gt;.&lt;table_name&gt;
    * @return list of valid write ids for the given table
    * @throws TException
    */
@@ -2902,7 +2901,7 @@ public interface IMetaStoreClient {
 
   /**
    * Get a structure that details valid write ids.
-   * @param fullTableName full table name of format <db_name>.<table_name>
+   * @param fullTableName full table name of format 
&lt;db_name&gt;.&lt;table_name&gt;
    * @param writeId The write id to get the corresponding txn
    * @return list of valid write ids for the given table
    * @throws TException
@@ -2911,7 +2910,7 @@ public interface IMetaStoreClient {
 
   /**
    * Get a structure that details valid write ids list for all tables read by 
current txn.
-   * @param tablesList list of tables (format: <db_name>.<table_name>) read 
from the current transaction
+   * @param tablesList list of tables (format: 
&lt;db_name&gt;.&lt;table_name&gt;) read from the current transaction
    *                   for which needs to populate the valid write ids
    * @param validTxnList snapshot of valid txns for the current txn
    * @return list of valid write ids for the given list of tables.
diff --git 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
index ecd5996..6c17c86 100755
--- 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
+++ 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
@@ -590,7 +590,7 @@ public class Warehouse {
    * pairs to create the Path for the partition directory
    *
    * @param db - parent database which is used to get the base location of the 
partition directory
-   * @param tableName - table name for the partitions
+   * @param table - table for the partitions
    * @param pm - Partition key value pairs
    * @return
    * @throws MetaException
diff --git 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/FilterUtils.java
 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/FilterUtils.java
index da70dbc..2ed314b 100644
--- 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/FilterUtils.java
+++ 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/FilterUtils.java
@@ -325,7 +325,6 @@ public class FilterUtils {
    * could improve performance when filtering partitions.
    * @param dbName the database name
    * @param tblName the table name contained in the database
-   * @return if the
    * @throws NoSuchObjectException if the database or table is filtered out
    */
   public static void checkDbAndTableFilters(boolean isFilterEnabled,
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
index f4e0c41..d903a91 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
@@ -35,7 +35,7 @@ public interface AlterHandler extends Configurable {
 
   /**
    * @deprecated As of release 2.2.0. Replaced by {@link #alterTable(RawStore, 
Warehouse, String,
-   * String, String, Table, EnvironmentContext, IHMSHandler)}
+   * String, String, Table, EnvironmentContext, IHMSHandler, String)}
    *
    * handles alter table, the changes could be cascaded to partitions if 
applicable
    *
@@ -92,8 +92,8 @@ public interface AlterHandler extends Configurable {
           throws InvalidOperationException, MetaException;
 
   /**
-   * @deprecated As of release 2.2.0.  Replaced by {@link 
#alterPartition(RawStore, Warehouse, String,
-   * String, List, Partition, EnvironmentContext, IHMSHandler)}
+   * @deprecated As of release 2.2.0.  Replaced by {@link 
#alterPartitions(RawStore, Warehouse, String,
+   * String, String, List, EnvironmentContext, String, long, IHMSHandler)}
    *
    * handles alter partition
    *
@@ -151,7 +151,7 @@ public interface AlterHandler extends Configurable {
 
   /**
    * @deprecated As of release 3.0.0. Replaced by {@link 
#alterPartitions(RawStore, Warehouse, String,
-   * String, String, List, EnvironmentContext, IHMSHandler)}
+   * String, String, List, EnvironmentContext, String, long, IHMSHandler)}
    *
    * handles alter partitions
    *
@@ -201,4 +201,4 @@ public interface AlterHandler extends Configurable {
     EnvironmentContext environmentContext,  String writeIdList, long writeId,
     IHMSHandler handler)
       throws InvalidOperationException, InvalidObjectException, 
AlreadyExistsException, MetaException;
-}
\ No newline at end of file
+}
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockHandler.java
index dd31226..da7478f 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockHandler.java
@@ -32,9 +32,9 @@ import java.util.concurrent.atomic.AtomicReference;
  * This is a lock handler implementation for the materializations rebuild.
  * It is lightweight: it does not persist any information to metastore db.
  * Its states are as follows:
- * 1) request lock -> 2) ACQUIRED -> 4) COMMIT_READY -> 6) release lock
- *                                -> 5) EXPIRED      ->
- *                 -> 3) NOT_ACQUIRED
+ * 1) request lock -&gt; 2) ACQUIRED -&gt; 4) COMMIT_READY -&gt; 6) release 
lock
+ *                                -&gt; 5) EXPIRED      -&gt;
+ *                 -&gt; 3) NOT_ACQUIRED
  * First, the rebuild operation will ACQUIRE the lock. If other rebuild
  * operation for the same operation is already running, we lock status
  * will be NOT_ACQUIRED.
@@ -107,7 +107,6 @@ public class MaterializationsRebuildLockHandler {
    * @param dbName the db name of the materialization
    * @param tableName the table name of the materialization
    * @param txnId the transaction id for the rebuild
-   * @throws MetaException
    */
   public boolean refreshLockResource(String dbName, String tableName, long 
txnId) {
     final ResourceLock prevResourceLock = 
locks.get(Warehouse.getQualifiedName(dbName, tableName));
@@ -127,7 +126,6 @@ public class MaterializationsRebuildLockHandler {
    * @param tableName the table name of the materialization
    * @param txnId the transaction id for the rebuild
    * @return true if the lock could be released properly, false otherwise
-   * @throws MetaException
    */
   public boolean unlockResource(String dbName, String tableName, long txnId) {
     final String fullyQualifiedName = Warehouse.getQualifiedName(dbName, 
tableName);
@@ -141,7 +139,6 @@ public class MaterializationsRebuildLockHandler {
   /**
    * Method that removes from the handler those locks that have expired.
    * @param timeout time after which we consider the locks to have expired
-   * @throws MetaException
    */
   public long cleanupResourceLocks(long timeout) {
     long removed = 0L;
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/PartitionIterable.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/PartitionIterable.java
index 2837ff4..5f02a40 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/PartitionIterable.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/PartitionIterable.java
@@ -29,10 +29,10 @@ import org.apache.hadoop.hive.metastore.api.Table;
 
 
 /**
- * PartitionIterable - effectively a lazy Iterable<Partition>
+ * PartitionIterable - effectively a lazy Iterable&lt;Partition&gt;
  * Sometimes, we have a need for iterating through a list of partitions,
  * but the list of partitions can be too big to fetch as a single object.
- * Thus, the goal of PartitionIterable is to act as an Iterable<Partition>
+ * Thus, the goal of PartitionIterable is to act as an 
Iterable&lt;Partition&gt;
  * while lazily fetching each relevant partition, one after the other as
  * independent metadata calls.
  * It is very likely that any calls to PartitionIterable are going to result
@@ -133,7 +133,7 @@ public class PartitionIterable implements 
Iterable<Partition> {
   /**
    * Dummy constructor, which simply acts as an iterator on an already-present
    * list of partitions, allows for easy drop-in replacement for other methods
-   * that already have a List<Partition>
+   * that already have a List&lt;Partition&gt;
    */
   public PartitionIterable(Collection<Partition> ptnsProvided) {
     this.currType = Type.LIST_PROVIDED;
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index 03a116a..8c1ab73 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -280,7 +280,6 @@ public interface RawStore extends Configurable {
    * @param dbName database name.
    * @param tableName table name.
    * @param part_vals partition values for this table.
-   * @param txnId transaction id of the calling transaction
    * @param writeIdList string format of valid writeId transaction list
    * @return the partition.
    * @throws MetaException error reading from RDBMS.
@@ -517,9 +516,8 @@ public interface RawStore extends Configurable {
    * @param new_parts list of new partitions.  The order must match the old 
partitions described in
    *                  part_vals_list.  Each of these should be a complete copy 
of the new
    *                  partition, not just the pieces to update.
-   * @param txnId transaction id of the transaction that called this method.
-   * @param writeIdList valid write id list of the transaction on the current 
table
-   * @param writeid write id of the transaction for the table
+   * @param writeId write id of the transaction for the table
+   * @param queryValidWriteIds valid write id list of the transaction on the 
current table
    * @return
    * @throws InvalidObjectException One of the indicated partitions does not 
exist.
    * @throws MetaException error accessing the RDBMS.
@@ -908,7 +906,6 @@ public interface RawStore extends Configurable {
    * @throws MetaException error accessing the RDBMS.
    * @throws InvalidObjectException the stats object is invalid
    * @throws InvalidInputException unable to record the stats for the table
-   * @throws TException
    */
   Map<String, String> updatePartitionColumnStatistics(ColumnStatistics 
statsObj,
      List<String> partVals, String validWriteIds, long writeId)
@@ -936,7 +933,6 @@ public interface RawStore extends Configurable {
    * @param dbName name of the database, defaults to current database
    * @param tableName name of the table
    * @param colName names of the columns for which statistics is requested
-   * @param txnId transaction id of the calling transaction
    * @param writeIdList string format of valid writeId transaction list
    * @return Relevant column statistics for the column for the given table
    * @throws NoSuchObjectException No such table
@@ -970,7 +966,6 @@ public interface RawStore extends Configurable {
    * @param tblName table name.
    * @param partNames list of partition names.  These are names so must be 
key1=val1[/key2=val2...]
    * @param colNames list of columns to get stats for
-   * @param txnId transaction id of the calling transaction
    * @param writeIdList string format of valid writeId transaction list
    * @return list of statistics objects
    * @throws MetaException error accessing the RDBMS
@@ -1233,7 +1228,6 @@ public interface RawStore extends Configurable {
    * @param partNames list of partition names.  These are the names of the 
partitions, not
    *                  values.
    * @param colNames list of column names
-   * @param txnId transaction id of the calling transaction
    * @param writeIdList string format of valid writeId transaction list
    * @return aggregated stats
    * @throws MetaException error accessing RDBMS
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/OpenTxnEvent.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/OpenTxnEvent.java
index 547c43e..d935ed1 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/OpenTxnEvent.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/OpenTxnEvent.java
@@ -43,7 +43,7 @@ public class OpenTxnEvent extends ListenerEvent {
   }
 
   /**
-   * @return List<Long> txnIds
+   * @return List&lt;Long&gt; txnIds
    */
   public List<Long> getTxnIds() {
     return txnIds;
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
index ea70503..ba45f39 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
@@ -51,8 +51,8 @@ public class CompactionInfo implements 
Comparable<CompactionInfo> {
   public String properties;
   public boolean tooManyAborts = false;
   /**
+   * The highest write id that the compaction job will pay attention to.
    * {@code 0} means it wasn't set (e.g. in case of upgrades, since 
ResultSet.getLong() will return 0 if field is NULL) 
-   * See {@link TxnStore#setCompactionHighestWriteId(CompactionInfo, long)} 
for precise definition.
    * See also {@link 
TxnUtils#createValidCompactWriteIdList(TableValidWriteIds)} and
    * {@link ValidCompactorWriteIdList#highWatermark}.
    */
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
index cd77b4e..da38a6b 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
@@ -124,7 +124,7 @@ public class TxnUtils {
    * Note, users are responsible for using the correct TxnManager. We do not 
look at
    * SessionState.get().getTxnMgr().supportsAcid() here
    * Should produce the same result as
-   * {@link 
org.apache.hadoop.hive.ql.io.AcidUtils#isTransactionalTable(org.apache.hadoop.hive.ql.metadata.Table)}.
+   * org.apache.hadoop.hive.ql.io.AcidUtils#isTransactionalTable.
    * @return true if table is a transactional table, false otherwise
    */
   public static boolean isTransactionalTable(Table table) {
@@ -147,7 +147,7 @@ public class TxnUtils {
 
   /**
    * Should produce the same result as
-   * {@link 
org.apache.hadoop.hive.ql.io.AcidUtils#isAcidTable(org.apache.hadoop.hive.ql.metadata.Table)}.
+   * org.apache.hadoop.hive.ql.io.AcidUtils#isAcidTable.
    */
   public static boolean isAcidTable(Table table) {
     return TxnUtils.isTransactionalTable(table) &&
@@ -156,7 +156,7 @@ public class TxnUtils {
   }
 
   /**
-   * Should produce the result as <dbName>.<tableName>.
+   * Should produce the result as &lt;dbName&gt;.&lt;tableName&gt;.
    */
   public static String getFullTableName(String dbName, String tableName) {
     return dbName.toLowerCase() + "." + tableName.toLowerCase();
diff --git 
a/storage-api/src/java/org/apache/hadoop/hive/common/io/FileMetadataCache.java 
b/storage-api/src/java/org/apache/hadoop/hive/common/io/FileMetadataCache.java
index e4aa888..d7de361 100644
--- 
a/storage-api/src/java/org/apache/hadoop/hive/common/io/FileMetadataCache.java
+++ 
b/storage-api/src/java/org/apache/hadoop/hive/common/io/FileMetadataCache.java
@@ -57,8 +57,6 @@ public interface FileMetadataCache {
   /**
    * Puts the metadata for a given file (e.g. a footer buffer into cache).
    * @param fileKey The file key.
-   * @param length The footer length.
-   * @param is The stream to read the footer from.
    * @return The buffer or buffers representing the cached footer.
    *         The caller must decref this buffer when done.
    */
diff --git 
a/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java 
b/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java
index a32aa62..fa7e079 100644
--- a/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java
+++ b/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java
@@ -63,12 +63,12 @@ import com.google.common.annotations.VisibleForTesting;
  * To bind to the correct metastore, HiveConf object has to be created from 
hive-site.xml or HIVE_CONF_DIR.
  * If hive conf is manually created, metastore uri has to be set correctly. If 
hive conf object is not specified,
  * "thrift://localhost:9083" will be used as default.
- * <br/><br/>
+ * <br><br>
  * NOTE: The streaming connection APIs and record writer APIs are not 
thread-safe. Streaming connection creation,
  * begin/commit/abort transactions, write and close has to be called in the 
same thread. If close() or
  * abortTransaction() has to be triggered from a separate thread it has to be 
co-ordinated via external variables or
  * synchronization mechanism
- * <br/><br/>
+ * <br><br>
  * Example usage:
  * <pre>{@code
  * // create delimited record writer whose schema exactly matches table schema
diff --git 
a/streaming/src/java/org/apache/hive/streaming/StreamingTransaction.java 
b/streaming/src/java/org/apache/hive/streaming/StreamingTransaction.java
index c0ee034..01c1164 100644
--- a/streaming/src/java/org/apache/hive/streaming/StreamingTransaction.java
+++ b/streaming/src/java/org/apache/hive/streaming/StreamingTransaction.java
@@ -119,7 +119,7 @@ public interface StreamingTransaction {
   Set<String> getPartitions();
 
   /**
-   * @return get the paris for transaction ids <--> write ids
+   * @return get the paris for transaction ids &lt;--&gt; write ids
    */
   List<TxnToWriteId> getTxnToWriteIds();
 }
diff --git 
a/testutils/src/java/org/apache/hive/testutils/jdbc/HiveBurnInClient.java 
b/testutils/src/java/org/apache/hive/testutils/jdbc/HiveBurnInClient.java
index cd6cce7..005119a 100644
--- a/testutils/src/java/org/apache/hive/testutils/jdbc/HiveBurnInClient.java
+++ b/testutils/src/java/org/apache/hive/testutils/jdbc/HiveBurnInClient.java
@@ -34,7 +34,7 @@ public class HiveBurnInClient {
   /**
    * Creates 2 tables to query from
    *
-   * @param num
+   * @param con
    */
   public static void createTables(Connection con) throws SQLException {
     Statement stmt = con.createStatement();

Reply via email to