HIVE-18448: Drop Support For Indexes From Apache Hive (Zoltan Haindrich 
reviewed by Ashutosh Chauhan)

Signed-off-by: Zoltan Haindrich <k...@rxd.hu>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b0d3cb45
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b0d3cb45
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b0d3cb45

Branch: refs/heads/master
Commit: b0d3cb4527e4855c8544349e8f9023a0284a3160
Parents: c2c188e
Author: Zoltan Haindrich <k...@rxd.hu>
Authored: Wed Feb 14 09:33:38 2018 +0100
Committer: Zoltan Haindrich <k...@rxd.hu>
Committed: Wed Feb 14 09:33:38 2018 +0100

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   38 +-
 .../SemanticAnalysis/HCatSemanticAnalyzer.java  |   11 -
 ...estDDLWithRemoteMetastoreSecondNamenode.java |   54 -
 .../org/apache/hadoop/hive/ql/QTestUtil.java    |   10 -
 .../java/org/apache/hadoop/hive/ql/Driver.java  |    1 -
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |    3 -
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  248 +---
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |   14 +-
 .../hadoop/hive/ql/exec/OperatorUtils.java      |   31 -
 .../apache/hadoop/hive/ql/exec/TaskFactory.java |    4 -
 .../apache/hadoop/hive/ql/exec/Utilities.java   |    5 -
 .../hadoop/hive/ql/exec/mr/ExecDriver.java      |    5 -
 .../hive/ql/index/AbstractIndexHandler.java     |   58 -
 .../hive/ql/index/AggregateIndexHandler.java    |  159 ---
 .../apache/hadoop/hive/ql/index/HiveIndex.java  |   75 --
 .../hadoop/hive/ql/index/HiveIndexHandler.java  |  142 --
 .../hive/ql/index/HiveIndexQueryContext.java    |  101 --
 .../hadoop/hive/ql/index/HiveIndexResult.java   |  209 ---
 .../hive/ql/index/HiveIndexedInputFormat.java   |  162 ---
 .../hive/ql/index/IndexMetadataChangeTask.java  |  100 --
 .../hive/ql/index/IndexMetadataChangeWork.java  |   67 -
 .../hive/ql/index/IndexPredicateAnalyzer.java   |    3 +
 .../hadoop/hive/ql/index/IndexResult.java       |   25 -
 .../hive/ql/index/IndexSearchCondition.java     |    4 +-
 .../hadoop/hive/ql/index/SplitFilter.java       |  125 --
 .../hive/ql/index/TableBasedIndexHandler.java   |  155 ---
 .../ql/index/bitmap/BitmapIndexHandler.java     |  312 -----
 .../hive/ql/index/bitmap/BitmapInnerQuery.java  |   70 -
 .../hive/ql/index/bitmap/BitmapObjectInput.java |  182 ---
 .../ql/index/bitmap/BitmapObjectOutput.java     |  126 --
 .../hive/ql/index/bitmap/BitmapOuterQuery.java  |   84 --
 .../hive/ql/index/bitmap/BitmapQuery.java       |   29 -
 .../ql/index/compact/CompactIndexHandler.java   |  408 ------
 .../compact/HiveCompactIndexInputFormat.java    |   33 -
 .../hadoop/hive/ql/io/HiveInputFormat.java      |   19 +
 .../hadoop/hive/ql/io/orc/ExternalCache.java    |   25 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  324 +----
 .../hadoop/hive/ql/metadata/HiveUtils.java      |   36 -
 .../formatting/MetaDataFormatUtils.java         |   53 +-
 .../hadoop/hive/ql/optimizer/IndexUtils.java    |  249 ----
 .../hadoop/hive/ql/optimizer/Optimizer.java     |   10 +-
 .../ql/optimizer/QueryPlanPostProcessor.java    |    2 -
 .../ql/optimizer/index/RewriteCanApplyCtx.java  |  265 ----
 .../index/RewriteCanApplyProcFactory.java       |  116 --
 .../ql/optimizer/index/RewriteGBUsingIndex.java |  359 -----
 .../index/RewriteParseContextGenerator.java     |  122 --
 .../RewriteQueryUsingAggregateIndexCtx.java     |  325 -----
 .../optimizer/physical/IndexWhereResolver.java  |   42 -
 .../optimizer/physical/PhysicalOptimizer.java   |    3 -
 .../physical/index/IndexWhereProcCtx.java       |   48 -
 .../physical/index/IndexWhereProcessor.java     |  255 ----
 .../index/IndexWhereTaskDispatcher.java         |  175 ---
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |  289 +---
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |  105 --
 .../hadoop/hive/ql/parse/IndexUpdater.java      |  153 ---
 .../hive/ql/parse/LoadSemanticAnalyzer.java     |   20 +-
 .../hive/ql/parse/SemanticAnalyzerFactory.java  |   10 -
 .../hadoop/hive/ql/parse/TaskCompiler.java      |   15 -
 .../hadoop/hive/ql/plan/AlterIndexDesc.java     |  132 --
 .../hadoop/hive/ql/plan/CreateIndexDesc.java    |  252 ----
 .../org/apache/hadoop/hive/ql/plan/DDLWork.java |   86 --
 .../hadoop/hive/ql/plan/DropIndexDesc.java      |   75 --
 .../hadoop/hive/ql/plan/HiveOperation.java      |    5 -
 .../org/apache/hadoop/hive/ql/plan/MapWork.java |   14 -
 .../hadoop/hive/ql/plan/ShowIndexesDesc.java    |   75 --
 .../hadoop/hive/ql/ppd/OpProcFactory.java       |    5 +-
 .../ql/security/authorization/Privilege.java    |    3 -
 .../authorization/PrivilegeRegistry.java        |    1 -
 .../security/authorization/PrivilegeType.java   |    1 -
 .../StorageBasedAuthorizationProvider.java      |    4 -
 .../AbstractGenericUDFEWAHBitmapBop.java        |  146 --
 .../ql/udf/generic/GenericUDAFEWAHBitmap.java   |  193 ---
 .../ql/udf/generic/GenericUDFEWAHBitmapAnd.java |   42 -
 .../udf/generic/GenericUDFEWAHBitmapEmpty.java  |  100 --
 .../ql/udf/generic/GenericUDFEWAHBitmapOr.java  |   42 -
 .../hadoop/hive/ql/index/MockIndexResult.java   |   38 -
 .../hadoop/hive/ql/index/MockInputFile.java     |    3 +-
 .../hive/ql/index/SplitFilterTestCase.java      |  153 ---
 .../ql/index/TestHiveInputSplitComparator.java  |    3 +-
 .../hadoop/hive/ql/index/TestIndexType.java     |   34 -
 .../hadoop/hive/ql/index/TestSplitFilter.java   |  296 ----
 .../hadoop/hive/ql/metadata/TestHive.java       |  122 --
 .../parse/authorization/TestPrivilegesV1.java   |    1 -
 .../alter_concatenate_indexed_table.q           |   18 -
 .../clientnegative/authorization_create_index.q |    7 -
 .../clientnegative/authorization_drop_index.q   |    8 -
 .../authorization_invalid_priv_v2.q             |    2 +-
 .../clientnegative/authorization_uri_index.q    |   14 -
 .../test/queries/clientnegative/bad_indextype.q |    1 -
 .../queries/clientnegative/drop_index_failure.q |    3 -
 .../clientnegative/index_bitmap_no_map_aggr.q   |    7 -
 .../clientnegative/index_compact_entry_limit.q  |   13 -
 .../clientnegative/index_compact_size_limit.q   |   14 -
 .../queries/clientnegative/merge_negative_1.q   |    3 -
 .../clientnegative/show_create_table_index.q    |    6 -
 .../queries/clientnegative/temp_table_index.q   |    2 -
 .../truncate_column_indexed_table.q             |    9 -
 .../alter_concatenate_indexed_table.q           |   51 -
 .../test/queries/clientpositive/alter_index.q   |   11 -
 .../clientpositive/authorization_index.q        |   13 -
 .../test/queries/clientpositive/database_drop.q |   38 +-
 ql/src/test/queries/clientpositive/drop_index.q |    2 -
 .../drop_index_removes_partition_dirs.q         |   22 -
 .../clientpositive/drop_table_with_index.q      |   35 -
 .../queries/clientpositive/escape_comments.q    |    2 -
 ql/src/test/queries/clientpositive/index_auth.q |   20 -
 ql/src/test/queries/clientpositive/index_auto.q |   31 -
 .../queries/clientpositive/index_auto_empty.q   |   26 -
 .../clientpositive/index_auto_file_format.q     |   23 -
 .../clientpositive/index_auto_mult_tables.q     |   25 -
 .../index_auto_mult_tables_compact.q            |   26 -
 .../clientpositive/index_auto_multiple.q        |   20 -
 .../clientpositive/index_auto_partitioned.q     |   17 -
 .../clientpositive/index_auto_self_join.q       |   19 -
 .../queries/clientpositive/index_auto_unused.q  |   64 -
 .../queries/clientpositive/index_auto_update.q  |   29 -
 .../test/queries/clientpositive/index_bitmap.q  |   52 -
 .../test/queries/clientpositive/index_bitmap1.q |   22 -
 .../test/queries/clientpositive/index_bitmap2.q |   39 -
 .../test/queries/clientpositive/index_bitmap3.q |   52 -
 .../queries/clientpositive/index_bitmap_auto.q  |   57 -
 .../index_bitmap_auto_partitioned.q             |   17 -
 .../clientpositive/index_bitmap_compression.q   |   18 -
 .../queries/clientpositive/index_bitmap_rc.q    |   58 -
 .../test/queries/clientpositive/index_compact.q |   46 -
 .../queries/clientpositive/index_compact_1.q    |   20 -
 .../queries/clientpositive/index_compact_2.q    |   50 -
 .../queries/clientpositive/index_compact_3.q    |   23 -
 .../index_compact_binary_search.q               |  132 --
 .../queries/clientpositive/index_compression.q  |   18 -
 .../queries/clientpositive/index_creation.q     |   54 -
 .../test/queries/clientpositive/index_in_db.q   |   16 -
 .../test/queries/clientpositive/index_serde.q   |   52 -
 .../queries/clientpositive/index_skewtable.q    |   23 -
 .../test/queries/clientpositive/index_stale.q   |   23 -
 .../clientpositive/index_stale_partitioned.q    |   29 -
 .../clientpositive/show_indexes_edge_cases.q    |   28 -
 .../clientpositive/show_indexes_syntax.q        |   24 -
 .../special_character_in_tabnames_2.q           |   23 -
 .../queries/clientpositive/udf_bitmap_and.q     |   14 -
 .../queries/clientpositive/udf_bitmap_empty.q   |    5 -
 .../test/queries/clientpositive/udf_bitmap_or.q |   14 -
 .../queries/clientpositive/unicode_comments.q   |    2 -
 ql/src/test/queries/clientpositive/union_view.q |   10 -
 .../authorization_invalid_priv_v2.q.out         |    4 +-
 .../index_bitmap_no_map_aggr.q.out              |   20 -
 .../index_compact_entry_limit.q.out             |   37 -
 .../index_compact_size_limit.q.out              |   37 -
 .../beeline/escape_comments.q.out               |   15 -
 .../results/clientpositive/database_drop.q.out  |  174 ---
 .../clientpositive/escape_comments.q.out        |   16 -
 .../results/clientpositive/index_auth.q.out     |   79 --
 .../results/clientpositive/index_auto.q.out     |  255 ----
 .../clientpositive/index_auto_empty.q.out       |  101 --
 .../clientpositive/index_auto_file_format.q.out |  256 ----
 .../clientpositive/index_auto_mult_tables.q.out |  438 ------
 .../index_auto_mult_tables_compact.q.out        |  485 -------
 .../clientpositive/index_auto_multiple.q.out    |  164 ---
 .../clientpositive/index_auto_partitioned.q.out |  172 ---
 .../clientpositive/index_auto_self_join.q.out   |  295 ----
 .../clientpositive/index_auto_unused.q.out      |  388 ------
 .../clientpositive/index_auto_update.q.out      |  353 -----
 .../results/clientpositive/index_bitmap.q.out   |  291 ----
 .../results/clientpositive/index_bitmap1.q.out  |   75 --
 .../results/clientpositive/index_bitmap2.q.out  |  138 --
 .../results/clientpositive/index_bitmap3.q.out  | 1262 -----------------
 .../clientpositive/index_bitmap_auto.q.out      | 1273 ------------------
 .../index_bitmap_auto_partitioned.q.out         |  150 ---
 .../index_bitmap_compression.q.out              |  133 --
 .../clientpositive/index_bitmap_rc.q.out        |  349 -----
 .../results/clientpositive/index_compact.q.out  |  271 ----
 .../clientpositive/index_compact_1.q.out        |   70 -
 .../clientpositive/index_compact_2.q.out        |  317 -----
 .../clientpositive/index_compact_3.q.out        |   84 --
 .../index_compact_binary_search.q.out           |  473 -------
 .../clientpositive/index_compression.q.out      |  158 ---
 .../results/clientpositive/index_creation.q.out |  321 -----
 .../results/clientpositive/index_in_db.q.out    |   57 -
 .../results/clientpositive/index_serde.q.out    |  242 ----
 .../clientpositive/index_skewtable.q.out        |  204 ---
 .../results/clientpositive/index_stale.q.out    |  106 --
 .../index_stale_partitioned.q.out               |  115 --
 .../results/clientpositive/show_functions.q.out |    4 -
 .../clientpositive/spark/union_view.q.out       |   23 +-
 .../special_character_in_tabnames_2.q.out       |  165 ---
 .../clientpositive/unicode_comments.q.out       |   15 -
 .../results/clientpositive/union_view.q.out     |  317 +----
 187 files changed, 124 insertions(+), 19179 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index ce96bff..f3980b6 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -597,7 +597,7 @@ public class HiveConf extends Configuration {
         "When hive.exec.mode.local.auto is true, the number of tasks should 
less than this for local mode."),
 
     DROPIGNORESNONEXISTENT("hive.exec.drop.ignorenonexistent", true,
-        "Do not report an error if DROP TABLE/VIEW/Index/Function specifies a 
non-existent table/view/index/function"),
+        "Do not report an error if DROP TABLE/VIEW/Index/Function specifies a 
non-existent table/view/function"),
 
     HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true, "Ignore the 
mapjoin hint"),
 
@@ -1616,10 +1616,8 @@ public class HiveConf extends Configuration {
         "Whether to provide the row offset virtual column"),
 
     // Optimizer
-    HIVEOPTINDEXFILTER("hive.optimize.index.filter", false,
-        "Whether to enable automatic use of indexes"),
-    HIVEINDEXAUTOUPDATE("hive.optimize.index.autoupdate", false,
-        "Whether to update stale indexes automatically"),
+    HIVEOPTINDEXFILTER("hive.optimize.index.filter", false, "Whether to enable 
automatic use of indexes"),
+
     HIVEOPTPPD("hive.optimize.ppd", true,
         "Whether to enable predicate pushdown"),
     HIVEOPTPPD_WINDOWING("hive.optimize.ppd.windowing", true,
@@ -1757,18 +1755,6 @@ public class HiveConf extends Configuration {
         "If the number of references to a CTE clause exceeds this threshold, 
Hive will materialize it\n" +
         "before executing the main query block. -1 will disable this 
feature."),
 
-    // Indexes
-    
HIVEOPTINDEXFILTER_COMPACT_MINSIZE("hive.optimize.index.filter.compact.minsize",
 (long) 5 * 1024 * 1024 * 1024,
-        "Minimum size (in bytes) of the inputs on which a compact index is 
automatically used."), // 5G
-    
HIVEOPTINDEXFILTER_COMPACT_MAXSIZE("hive.optimize.index.filter.compact.maxsize",
 (long) -1,
-        "Maximum size (in bytes) of the inputs on which a compact index is 
automatically used.  A negative number is equivalent to infinity."), // infinity
-    
HIVE_INDEX_COMPACT_QUERY_MAX_ENTRIES("hive.index.compact.query.max.entries", 
(long) 10000000,
-        "The maximum number of index entries to read during a query that uses 
the compact index. Negative value is equivalent to infinity."), // 10M
-    HIVE_INDEX_COMPACT_QUERY_MAX_SIZE("hive.index.compact.query.max.size", 
(long) 10 * 1024 * 1024 * 1024,
-        "The maximum number of bytes that a query using the compact index can 
read. Negative value is equivalent to infinity."), // 10G
-    HIVE_INDEX_COMPACT_BINARY_SEARCH("hive.index.compact.binary.search", true,
-        "Whether or not to use a binary search to find the entries in an index 
table that match the filter, where possible"),
-
     // Statistics
     HIVE_STATS_ESTIMATE_STATS("hive.stats.estimate", true,
         "Estimate statistics in absence of statistics."),
@@ -2139,9 +2125,6 @@ public class HiveConf extends Configuration {
     // For har files
     HIVEARCHIVEENABLED("hive.archive.enabled", false, "Whether archiving 
operations are permitted"),
 
-    HIVEOPTGBYUSINGINDEX("hive.optimize.index.groupby", false,
-        "Whether to enable optimization of group-by queries using Aggregate 
indexes."),
-
     HIVEFETCHTASKCONVERSION("hive.fetch.task.conversion", "more", new 
StringSet("none", "minimal", "more"),
         "Some select queries can be converted to single FETCH task minimizing 
latency.\n" +
         "Currently the query should be single sourced not having any subquery 
and should not have\n" +
@@ -2265,12 +2248,6 @@ public class HiveConf extends Configuration {
     HIVE_ERROR_ON_EMPTY_PARTITION("hive.error.on.empty.partition", false,
         "Whether to throw an exception if dynamic partition insert generates 
empty results."),
 
-    HIVE_INDEX_COMPACT_FILE("hive.index.compact.file", "", "internal 
variable"),
-    HIVE_INDEX_BLOCKFILTER_FILE("hive.index.blockfilter.file", "", "internal 
variable"),
-    HIVE_INDEX_IGNORE_HDFS_LOC("hive.index.compact.file.ignore.hdfs", false,
-        "When true the HDFS location stored in the index file will be ignored 
at runtime.\n" +
-        "If the data got moved or the name of the cluster got changed, the 
index data should still be usable."),
-
     HIVE_EXIM_URI_SCHEME_WL("hive.exim.uri.scheme.whitelist", 
"hdfs,pfile,file,s3,s3a",
         "A comma separated list of acceptable URI schemes for import and 
export."),
     // temporary variable for testing. This is added just to turn off this 
feature in case of a bug in
@@ -2289,12 +2266,6 @@ public class HiveConf extends Configuration {
     HIVE_REWORK_MAPREDWORK("hive.rework.mapredwork", false,
         "should rework the mapred work or not.\n" +
         "This is first introduced by SymlinkTextInputFormat to replace symlink 
files with real paths at compile time."),
-    HIVE_CONCATENATE_CHECK_INDEX ("hive.exec.concatenate.check.index", true,
-        "If this is set to true, Hive will throw error when doing\n" +
-        "'alter table tbl_name [partSpec] concatenate' on a table/partition\n" 
+
-        "that has indexes on it. The reason the user want to set this to 
true\n" +
-        "is because it can help user to avoid handling all index drop, 
recreation,\n" +
-        "rebuild work. This is very helpful for tables with thousands of 
partitions."),
     HIVE_IO_EXCEPTION_HANDLERS("hive.io.exception.handlers", "",
         "A list of io exception handler class names. This is used\n" +
         "to construct a list exception handlers to handle exceptions thrown\n" 
+
@@ -3073,7 +3044,7 @@ public class HiveConf extends Configuration {
         "hive.tez.bucket.pruning", false,
          "When pruning is enabled, filters on bucket columns will be processed 
by \n" +
          "filtering the splits against a bitset of included buckets. This 
needs predicates \n"+
-         "produced by hive.optimize.ppd and hive.optimize.index.filters."),
+            "produced by hive.optimize.ppd and hive.optimize.index.filters."),
     TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT(
         "hive.tez.bucket.pruning.compat", true,
         "When pruning is enabled, handle possibly broken inserts due to 
negative hashcodes.\n" +
@@ -4616,7 +4587,6 @@ public class HiveConf extends Configuration {
     ConfVars.HIVE_CHECK_CROSS_PRODUCT.varname,
     ConfVars.HIVE_CLI_TEZ_SESSION_ASYNC.varname,
     ConfVars.HIVE_COMPAT.varname,
-    ConfVars.HIVE_CONCATENATE_CHECK_INDEX.varname,
     ConfVars.HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY.varname,
     ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION.varname,
     ConfVars.HIVE_EXECUTION_ENGINE.varname,

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
index bec1f26..8105e8b 100644
--- 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
+++ 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
@@ -90,12 +90,6 @@ public class HCatSemanticAnalyzer extends 
HCatSemanticAnalyzerBase {
     case HiveParser.TOK_DESCDATABASE:
     case HiveParser.TOK_ALTERDATABASE_PROPERTIES:
 
-      // Index DDL
-    case HiveParser.TOK_ALTERINDEX_PROPERTIES:
-    case HiveParser.TOK_CREATEINDEX:
-    case HiveParser.TOK_DROPINDEX:
-    case HiveParser.TOK_SHOWINDEXES:
-
       // View DDL
       // "alter view add partition" does not work because of the nature of 
implementation
       // of the DDL in hive. Hive will internally invoke another Driver on the 
select statement,
@@ -174,11 +168,6 @@ public class HCatSemanticAnalyzer extends 
HCatSemanticAnalyzerBase {
       case HiveParser.TOK_DESCDATABASE:
       case HiveParser.TOK_ALTERDATABASE_PROPERTIES:
 
-        // Index DDL
-      case HiveParser.TOK_ALTERINDEX_PROPERTIES:
-      case HiveParser.TOK_CREATEINDEX:
-      case HiveParser.TOK_DROPINDEX:
-      case HiveParser.TOK_SHOWINDEXES:
         break;
 
         // View DDL

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
index 63a7313..de33833 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
 import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.ql.exec.mr.ExecDriver;
 import org.apache.hadoop.hive.ql.metadata.*;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
@@ -240,24 +239,6 @@ public class TestDDLWithRemoteMetastoreSecondNamenode 
extends TestCase {
     return table;
   }
 
-  private void createIndexAndCheck(Table table, String indexName, String 
indexLocation) throws Exception {
-    executeQuery("CREATE INDEX " + indexName + " ON TABLE " + 
table.getTableName()
-            + " (col1) AS 'COMPACT' WITH DEFERRED REBUILD "
-            + buildLocationClause(indexLocation));
-    Index index = db.getIndex(table.getTableName(), indexName);
-    assertNotNull("Index object is expected for " + indexName , index);
-    String location = index.getSd().getLocation();
-    if (indexLocation != null) {
-      assertEquals("Index should be located in the second filesystem",
-              fs2.makeQualified(new Path(indexLocation)).toString(), location);
-    }
-    else {
-      // Since warehouse path is non-qualified the index should be located on 
second filesystem
-      assertEquals("Index should be located in the second filesystem",
-              fs2.getUri().getScheme(), new URI(location).getScheme());
-    }
-  }
-
   private void createDatabaseAndCheck(String databaseName, String 
databaseLocation) throws Exception {
     executeQuery("CREATE DATABASE " + databaseName + 
buildLocationClause(databaseLocation));
     Database database = db.getDatabase(databaseName);
@@ -274,41 +255,6 @@ public class TestDDLWithRemoteMetastoreSecondNamenode 
extends TestCase {
     }
   }
 
-  public void testCreateTableWithIndexAndPartitionsNonDefaultNameNode() throws 
Exception {
-    assertTrue("Test suite should be initialied", isInitialized );
-    final String tableLocation = tmppathFs2 + "/" + Table1Name;
-    final String table5Location = tmppathFs2 + "/" + Table5Name;
-    final String indexLocation = tmppathFs2 + "/" + Index1Name;
-    final String partition3Location = fs.makeQualified(new Path(tmppath + 
"/p3")).toString();
-
-    // Create table with absolute non-qualified path
-    Table table1 = createTableAndCheck(Table1Name, tableLocation);
-
-    // Create table without location
-    createTableAndCheck(Table2Name, null);
-
-    // Add partition without location
-    addPartitionAndCheck(table1, "p", "p1", null);
-
-    // Add partition with absolute location
-    addPartitionAndCheck(table1, "p", "p2", tableLocation + "/p2");
-
-    // Add partition with qualified location in default fs
-    addPartitionAndCheck(table1, "p", "p3", partition3Location);
-
-    // Create index with absolute non-qualified path
-    createIndexAndCheck(table1, Index1Name, indexLocation);
-
-    // Create index with absolute non-qualified path
-    createIndexAndCheck(table1, Index2Name, null);
-
-    // Create table like Table1Name absolute non-qualified path
-    createTableAndCheck(table1, Table5Name, table5Location);
-
-    // Create table without location
-    createTableAndCheck(table1, Table6Name, null);
-  }
-
   public void testAlterPartitionSetLocationNonDefaultNameNode() throws 
Exception {
     assertTrue("Test suite should have been initialized", isInitialized);
     String tableLocation = tmppathFs2 + "/" + "test_set_part_loc";

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java 
b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index fcce531..6cd7a13 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -95,12 +95,10 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hive.druid.MiniDruidCluster;
-import org.apache.hive.testutils.HiveTestEnvSetup;
 import org.apache.hadoop.hive.llap.LlapItUtils;
 import org.apache.hadoop.hive.llap.daemon.MiniLlapCluster;
 import org.apache.hadoop.hive.llap.io.api.LlapProxy;
 import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.Task;
@@ -966,14 +964,6 @@ public class QTestUtil {
             continue;
           }
           db.dropTable(dbName, tblName, true, true, fsType == 
FsType.encrypted_hdfs);
-        } else {
-          // this table is defined in srcTables, drop all indexes on it
-         List<Index> indexes = db.getIndexes(dbName, tblName, (short)-1);
-          if (indexes != null && indexes.size() > 0) {
-            for (Index index : indexes) {
-              db.dropIndex(dbName, tblName, index.getIndexName(), true, true);
-            }
-          }
         }
       }
       if (!DEFAULT_DATABASE_NAME.equals(dbName)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java 
b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 23b209e..d00e639 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -764,7 +764,6 @@ public class Driver implements IDriver {
       case SHOWTABLES:
       case SHOWCOLUMNS:
       case SHOWFUNCTIONS:
-      case SHOWINDEXES:
       case SHOWPARTITIONS:
       case SHOWLOCKS:
       case SHOWVIEWS:

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java 
b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 39a613c..883dcda 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -62,7 +62,6 @@ public enum ErrorMsg {
   //========================== 10000 range starts here 
========================//
   INVALID_TABLE(10001, "Table not found", "42S02"),
   INVALID_COLUMN(10002, "Invalid column reference"),
-  INVALID_INDEX(10003, "Invalid index"),
   INVALID_TABLE_OR_COLUMN(10004, "Invalid table alias or column reference"),
   AMBIGUOUS_TABLE_OR_COLUMN(10005, "Ambiguous table alias or column 
reference"),
   INVALID_PARTITION(10006, "Partition not found"),
@@ -326,7 +325,6 @@ public enum ErrorMsg {
   TABLES_INCOMPATIBLE_SCHEMAS(10235, "Tables have incompatible schemas and 
their partitions " +
             " cannot be exchanged."),
 
-  TRUNCATE_COLUMN_INDEXED_TABLE(10236, "Can not truncate columns from table 
with indexes"),
   TRUNCATE_COLUMN_NOT_RC(10237, "Only RCFileFormat supports column 
truncation."),
   TRUNCATE_COLUMN_ARCHIVED(10238, "Column truncation cannot be performed on 
archived partitions."),
   TRUNCATE_BUCKETED_COLUMN(10239,
@@ -426,7 +424,6 @@ public enum ErrorMsg {
       "Grouping sets aggregations (with rollups or cubes) are not allowed when 
" +
       "HIVEMULTIGROUPBYSINGLEREDUCER is turned on. Set 
hive.multigroupby.singlereducer=false if you want to use grouping sets"),
   CANNOT_RETRIEVE_TABLE_METADATA(10316, "Error while retrieving table 
metadata"),
-  CANNOT_DROP_INDEX(10317, "Error while dropping index"),
   INVALID_AST_TREE(10318, "Internal error : Invalid AST"),
   ERROR_SERIALIZE_METASTORE(10319, "Error while serializing the metastore 
objects"),
   IO_ERROR(10320, "Error while performing IO operation "),

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 227f6ae..802349f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -51,7 +51,6 @@ import java.util.TreeSet;
 import java.util.concurrent.ExecutionException;
 
 import com.google.common.collect.ImmutableSet;
-import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -61,7 +60,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.common.ValidReadTxnList;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.conf.Constants;
@@ -76,7 +74,6 @@ import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.BasicTxnInfo;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CompactionResponse;
@@ -85,7 +82,6 @@ import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
-import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -108,7 +104,6 @@ import 
org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.TxnInfo;
 import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
 import org.apache.hadoop.hive.metastore.api.WMTrigger;
 import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
@@ -150,7 +145,6 @@ import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry;
 import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreChecker;
-import org.apache.hadoop.hive.ql.metadata.HiveUtils;
 import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
 import org.apache.hadoop.hive.ql.metadata.NotNullConstraint;
 import org.apache.hadoop.hive.ql.metadata.Partition;
@@ -172,7 +166,6 @@ import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
 import org.apache.hadoop.hive.ql.plan.AbortTxnsDesc;
 import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
 import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
-import org.apache.hadoop.hive.ql.plan.AlterIndexDesc;
 import org.apache.hadoop.hive.ql.plan.AlterMaterializedViewDesc;
 import org.apache.hadoop.hive.ql.plan.AlterResourcePlanDesc;
 import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc;
@@ -184,7 +177,6 @@ import org.apache.hadoop.hive.ql.plan.AlterWMTriggerDesc;
 import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc;
 import org.apache.hadoop.hive.ql.plan.ColStatistics;
 import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
-import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
 import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc;
 import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc;
 import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc;
@@ -198,7 +190,6 @@ import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.DescFunctionDesc;
 import org.apache.hadoop.hive.ql.plan.DescTableDesc;
 import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc;
-import org.apache.hadoop.hive.ql.plan.DropIndexDesc;
 import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc;
 import org.apache.hadoop.hive.ql.plan.DropTableDesc;
 import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc;
@@ -232,7 +223,6 @@ import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc;
 import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc;
 import org.apache.hadoop.hive.ql.plan.ShowGrantDesc;
-import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc;
 import org.apache.hadoop.hive.ql.plan.ShowLocksDesc;
 import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc;
 import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc;
@@ -394,21 +384,6 @@ public class DDLTask extends Task<DDLWork> implements 
Serializable {
         return createTable(db, crtTbl);
       }
 
-      CreateIndexDesc crtIndex = work.getCreateIndexDesc();
-      if (crtIndex != null) {
-        return createIndex(db, crtIndex);
-      }
-
-      AlterIndexDesc alterIndex = work.getAlterIndexDesc();
-      if (alterIndex != null) {
-        return alterIndex(db, alterIndex);
-      }
-
-      DropIndexDesc dropIdx = work.getDropIdxDesc();
-      if (dropIdx != null) {
-        return dropIndex(db, dropIdx);
-      }
-
       CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc();
       if (crtTblLike != null) {
         return createTableLike(db, crtTblLike);
@@ -589,11 +564,6 @@ public class DDLTask extends Task<DDLWork> implements 
Serializable {
         return grantOrRevokeRole(db, grantOrRevokeRoleDDL);
       }
 
-      ShowIndexesDesc showIndexes = work.getShowIndexesDesc();
-      if (showIndexes != null) {
-        return showIndexes(db, showIndexes);
-      }
-
       AlterTablePartMergeFilesDesc mergeFilesDesc = work.getMergeFilesDesc();
       if (mergeFilesDesc != null) {
         return mergeFiles(db, mergeFilesDesc, driverContext);
@@ -743,8 +713,12 @@ public class DDLTask extends Task<DDLWork> implements 
Serializable {
     if (!mustHaveAppliedChange && !desc.isReplace()) {
       return 0; // The modification cannot affect an active plan.
     }
-    if (appliedRp == null && !mustHaveAppliedChange) return 0; // Replacing an 
inactive plan.
-    if (wm == null && isInTest) return 0; // Skip for tests if WM is not 
present.
+    if (appliedRp == null && !mustHaveAppliedChange) {
+      return 0; // Replacing an inactive plan.
+    }
+    if (wm == null && isInTest) {
+      return 0; // Skip for tests if WM is not present.
+    }
 
     if ((appliedRp == null) != desc.isForceDeactivate()) {
       throw new HiveException("Cannot get a resource plan to apply; or 
non-null plan on disable");
@@ -1250,134 +1224,6 @@ public class DDLTask extends Task<DDLWork> implements 
Serializable {
     return 0;
   }
 
-  private int dropIndex(Hive db, DropIndexDesc dropIdx) throws HiveException {
-
-    if (HiveConf.getVar(conf, 
HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
-      throw new UnsupportedOperationException("Indexes unsupported for Tez 
execution engine");
-    }
-
-    db.dropIndex(dropIdx.getTableName(), dropIdx.getIndexName(), 
dropIdx.isThrowException(), true);
-    return 0;
-  }
-
-  private int createIndex(Hive db, CreateIndexDesc crtIndex) throws 
HiveException {
-
-    if (HiveConf.getVar(conf, 
HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
-      throw new UnsupportedOperationException("Indexes unsupported for Tez 
execution engine");
-    }
-
-    if( crtIndex.getSerde() != null) {
-      validateSerDe(crtIndex.getSerde());
-    }
-
-    String indexTableName = crtIndex.getIndexTableName();
-    // If location is specified - ensure that it is a full qualified name
-    makeLocationQualified(crtIndex, indexTableName);
-
-    db
-    .createIndex(
-        crtIndex.getTableName(), crtIndex.getIndexName(), 
crtIndex.getIndexTypeHandlerClass(),
-        crtIndex.getIndexedCols(), crtIndex.getIndexTableName(), 
crtIndex.getDeferredRebuild(),
-        crtIndex.getInputFormat(), crtIndex.getOutputFormat(), 
crtIndex.getSerde(),
-        crtIndex.getStorageHandler(), crtIndex.getLocation(), 
crtIndex.getIdxProps(), crtIndex.getTblProps(),
-        crtIndex.getSerdeProps(), crtIndex.getCollItemDelim(), 
crtIndex.getFieldDelim(), crtIndex.getFieldEscape(),
-        crtIndex.getLineDelim(), crtIndex.getMapKeyDelim(), 
crtIndex.getIndexComment()
-        );
-    if (HiveUtils.getIndexHandler(conf, 
crtIndex.getIndexTypeHandlerClass()).usesIndexTable()) {
-          Table indexTable = db.getTable(indexTableName);
-          addIfAbsentByName(new WriteEntity(indexTable, 
WriteEntity.WriteType.DDL_NO_LOCK));
-    }
-    return 0;
-  }
-
-  private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws 
HiveException {
-
-    if (HiveConf.getVar(conf, 
HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
-      throw new UnsupportedOperationException("Indexes unsupported for Tez 
execution engine");
-    }
-
-    String baseTableName = alterIndex.getBaseTableName();
-    String indexName = alterIndex.getIndexName();
-    Index idx = db.getIndex(baseTableName, indexName);
-
-    switch(alterIndex.getOp()) {
-    case ADDPROPS:
-      idx.getParameters().putAll(alterIndex.getProps());
-      break;
-    case UPDATETIMESTAMP:
-      try {
-        Map<String, String> props = new HashMap<String, String>();
-        Map<Map<String, String>, Long> basePartTs = new HashMap<Map<String, 
String>, Long>();
-
-        Table baseTbl = db.getTable(baseTableName);
-
-        if (baseTbl.isPartitioned()) {
-          List<Partition> baseParts;
-          if (alterIndex.getSpec() != null) {
-            baseParts = db.getPartitions(baseTbl, alterIndex.getSpec());
-          } else {
-            baseParts = db.getPartitions(baseTbl);
-          }
-          if (baseParts != null) {
-            for (Partition p : baseParts) {
-              Path dataLocation = p.getDataLocation();
-              FileSystem fs = dataLocation.getFileSystem(db.getConf());
-              FileStatus fss = fs.getFileStatus(dataLocation);
-              long lastModificationTime = fss.getModificationTime();
-
-              FileStatus[] parts = fs.listStatus(dataLocation, 
FileUtils.HIDDEN_FILES_PATH_FILTER);
-              if (parts != null && parts.length > 0) {
-                for (FileStatus status : parts) {
-                  if (status.getModificationTime() > lastModificationTime) {
-                    lastModificationTime = status.getModificationTime();
-                  }
-                }
-              }
-              basePartTs.put(p.getSpec(), lastModificationTime);
-            }
-          }
-        } else {
-          FileSystem fs = baseTbl.getPath().getFileSystem(db.getConf());
-          FileStatus fss = fs.getFileStatus(baseTbl.getPath());
-          basePartTs.put(null, fss.getModificationTime());
-        }
-        for (Map<String, String> spec : basePartTs.keySet()) {
-          if (spec != null) {
-            props.put(spec.toString(), basePartTs.get(spec).toString());
-          } else {
-            props.put("base_timestamp", basePartTs.get(null).toString());
-          }
-        }
-        idx.getParameters().putAll(props);
-      } catch (HiveException e) {
-        throw new HiveException("ERROR: Failed to update index timestamps");
-      } catch (IOException e) {
-        throw new HiveException("ERROR: Failed to look up timestamps on 
filesystem");
-      }
-
-      break;
-    default:
-      console.printError("Unsupported Alter command");
-      return 1;
-    }
-
-    // set last modified by properties
-    if (!updateModifiedParameters(idx.getParameters(), conf)) {
-      return 1;
-    }
-
-    try {
-      db.alterIndex(baseTableName, indexName, idx);
-    } catch (InvalidOperationException e) {
-      console.printError("Invalid alter operation: " + e.getMessage());
-      LOG.info("alter index: ", e);
-      return 1;
-    } catch (HiveException e) {
-      console.printError("Invalid alter operation: " + e.getMessage());
-      return 1;
-    }
-    return 0;
-  }
 
   /**
    * Alters a materialized view.
@@ -2782,57 +2628,6 @@ public class DDLTask extends Task<DDLWork> implements 
Serializable {
     return builder;
   }
 
-  /**
-   * Write a list of indexes to a file.
-   *
-   * @param db
-   *          The database in question.
-   * @param showIndexes
-   *          These are the indexes we're interested in.
-   * @return Returns 0 when execution succeeds and above 0 if it fails.
-   * @throws HiveException
-   *           Throws this exception if an unexpected error occurs.
-   */
-  private int showIndexes(Hive db, ShowIndexesDesc showIndexes) throws 
HiveException {
-    // get the indexes for the table and populate the output
-    String tableName = showIndexes.getTableName();
-    Table tbl = null;
-    List<Index> indexes = null;
-
-    tbl = db.getTable(tableName);
-
-    indexes = db.getIndexes(tbl.getDbName(), tbl.getTableName(), (short) -1);
-
-    // In case the query is served by HiveServer2, don't pad it with spaces,
-    // as HiveServer2 output is consumed by JDBC/ODBC clients.
-    boolean isOutputPadded = !SessionState.get().isHiveServerQuery();
-
-    // write the results in the file
-    DataOutputStream outStream = getOutputStream(showIndexes.getResFile());
-    try {
-      if (showIndexes.isFormatted()) {
-        // column headers
-        
outStream.write(MetaDataFormatUtils.getIndexColumnsHeader().getBytes(StandardCharsets.UTF_8));
-      }
-
-      for (Index index : indexes)
-      {
-        outStream.write(MetaDataFormatUtils.getIndexInformation(index, 
isOutputPadded).getBytes(StandardCharsets.UTF_8));
-      }
-    } catch (FileNotFoundException e) {
-      LOG.info("show indexes: ", e);
-      throw new HiveException(e.toString());
-    } catch (IOException e) {
-      LOG.info("show indexes: ", e);
-      throw new HiveException(e.toString());
-    } catch (Exception e) {
-      throw new HiveException(e.toString());
-    } finally {
-      IOUtils.closeStream(outStream);
-    }
-
-    return 0;
-  }
 
   /**
    * Write a list of the available databases to a file.
@@ -5294,37 +5089,6 @@ public class DDLTask extends Task<DDLWork> implements 
Serializable {
   }
 
    /**
-   * Make qualified location for an index .
-   *
-   * @param crtIndex
-   *          Create index descriptor.
-   * @param name
-   *          Object name.
-   */
-  private void makeLocationQualified(CreateIndexDesc crtIndex, String name) 
throws HiveException
-  {
-    Path path = null;
-    if (crtIndex.getLocation() == null) {
-      // Location is not set, leave it as-is if index doesn't belong to 
default DB
-      // Currently all indexes are created in current DB only
-      if 
(Utilities.getDatabaseName(name).equalsIgnoreCase(Warehouse.DEFAULT_DATABASE_NAME))
 {
-        // Default database name path is always ignored, use 
METASTOREWAREHOUSE and object name
-        // instead
-        String warehouse = HiveConf.getVar(conf, ConfVars.METASTOREWAREHOUSE);
-        String tableName = Utilities.getTableName(name);
-        path = new Path(warehouse, tableName.toLowerCase());
-      }
-    }
-    else {
-      path = new Path(crtIndex.getLocation());
-    }
-
-    if (path != null) {
-      crtIndex.setLocation(Utilities.getQualifiedPath(conf, path));
-    }
-  }
-
-   /**
    * Make qualified location for a database .
    *
    * @param database

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index f7801bb..32fc257 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -383,10 +383,6 @@ public final class FunctionRegistry {
     system.registerGenericUDF("between", GenericUDFBetween.class);
     system.registerGenericUDF("in_bloom_filter", 
GenericUDFInBloomFilter.class);
 
-    system.registerGenericUDF("ewah_bitmap_and", 
GenericUDFEWAHBitmapAnd.class);
-    system.registerGenericUDF("ewah_bitmap_or", GenericUDFEWAHBitmapOr.class);
-    system.registerGenericUDF("ewah_bitmap_empty", 
GenericUDFEWAHBitmapEmpty.class);
-
     // Utility UDFs
     system.registerUDF("version", UDFVersion.class, false);
 
@@ -447,8 +443,6 @@ public final class FunctionRegistry {
     system.registerGenericUDAF("ngrams", new GenericUDAFnGrams());
     system.registerGenericUDAF("context_ngrams", new 
GenericUDAFContextNGrams());
 
-    system.registerGenericUDAF("ewah_bitmap", new GenericUDAFEWAHBitmap());
-
     system.registerGenericUDAF("compute_stats", new GenericUDAFComputeStats());
     system.registerGenericUDAF("bloom_filter", new GenericUDAFBloomFilter());
     system.registerUDAF("percentile", UDAFPercentile.class);
@@ -1661,7 +1655,9 @@ public final class FunctionRegistry {
 
   public static boolean isPermanentFunction(ExprNodeGenericFuncDesc fnExpr) {
     GenericUDF udf = fnExpr.getGenericUDF();
-    if (udf == null) return false;
+    if (udf == null) {
+      return false;
+    }
 
     Class<?> clazz = udf.getClass();
     if (udf instanceof GenericUDFBridge) {
@@ -1787,7 +1783,9 @@ public final class FunctionRegistry {
    */
   public static boolean isBuiltInFuncExpr(ExprNodeGenericFuncDesc fnExpr) {
     GenericUDF udf = fnExpr.getGenericUDF();
-    if (udf == null) return false;
+    if (udf == null) {
+      return false;
+    }
 
     Class clazz = udf.getClass();
     if (udf instanceof GenericUDFBridge) {

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
index 5d2c759..c2959d9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
@@ -197,33 +197,6 @@ public class OperatorUtils {
     return lastOp;
   }
 
-  /**
-   * Starting at the input operator, finds the last operator upstream that is
-   * an instance of the input class.
-   *
-   * @param op the starting operator
-   * @param clazz the class that the operator that we are looking for 
instantiates
-   * @return null if no such operator exists or multiple branches are found in
-   * the stream, the last operator otherwise
-   */
-  @SuppressWarnings("unchecked")
-  public static <T> T findLastOperatorUpstream(Operator<?> op, Class<T> clazz) 
{
-    Operator<?> currentOp = op;
-    T lastOp = null;
-    while (currentOp != null) {
-      if (clazz.isInstance(currentOp)) {
-        lastOp = (T) currentOp;
-      }
-      if (currentOp.getParentOperators().size() == 1) {
-        currentOp = currentOp.getParentOperators().get(0);
-      }
-      else {
-        currentOp = null;
-      }
-    }
-    return lastOp;
-  }
-
   public static void iterateParents(Operator<?> operator, 
Function<Operator<?>> function) {
     iterateParents(operator, function, new HashSet<Operator<?>>());
   }
@@ -240,10 +213,6 @@ public class OperatorUtils {
     }
   }
 
-  public static boolean sameRowSchema(Operator<?> operator1, Operator<?> 
operator2) {
-    return operator1.getSchema().equals(operator2.getSchema());
-  }
-
   /**
    * Given an operator and a set of classes, it classifies the operators it 
finds
    * in the stream depending on the classes they instantiate.

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
index 85cef86..83590e2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
@@ -33,8 +33,6 @@ import 
org.apache.hadoop.hive.ql.exec.repl.bootstrap.ReplLoadTask;
 import org.apache.hadoop.hive.ql.exec.repl.bootstrap.ReplLoadWork;
 import org.apache.hadoop.hive.ql.exec.spark.SparkTask;
 import org.apache.hadoop.hive.ql.exec.tez.TezTask;
-import org.apache.hadoop.hive.ql.index.IndexMetadataChangeTask;
-import org.apache.hadoop.hive.ql.index.IndexMetadataChangeWork;
 import org.apache.hadoop.hive.ql.io.merge.MergeFileTask;
 import org.apache.hadoop.hive.ql.io.merge.MergeFileWork;
 import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
@@ -104,8 +102,6 @@ public final class TaskFactory {
         DependencyCollectionTask.class));
     taskvec.add(new TaskTuple<ImportCommitWork>(ImportCommitWork.class,
         ImportCommitTask.class));
-    taskvec.add(new 
TaskTuple<IndexMetadataChangeWork>(IndexMetadataChangeWork.class,
-        IndexMetadataChangeTask.class));
     taskvec.add(new TaskTuple<TezWork>(TezWork.class, TezTask.class));
     taskvec.add(new TaskTuple<SparkWork>(SparkWork.class, SparkTask.class));
     taskvec.add(new TaskTuple<>(ReplDumpWork.class, ReplDumpTask.class));

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 8f44c94..8248442 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -3567,11 +3567,6 @@ public final class Utilities {
     if (mWork.getInputformat() != null) {
       HiveConf.setVar(conf, var, mWork.getInputformat());
     }
-    if (mWork.getIndexIntermediateFile() != null) {
-      conf.set(ConfVars.HIVE_INDEX_COMPACT_FILE.varname, 
mWork.getIndexIntermediateFile());
-      conf.set(ConfVars.HIVE_INDEX_BLOCKFILTER_FILE.varname, 
mWork.getIndexIntermediateFile());
-    }
-
     // Intentionally overwrites anything the user may have put here
     conf.setBoolean("hive.input.format.sorted", mWork.isInputFormatSorted());
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
index b436e80..e7fe4a2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
@@ -574,11 +574,6 @@ public class ExecDriver extends Task<MapredWork> 
implements Serializable, Hadoop
     if (mWork.getInputformat() != null) {
       HiveConf.setVar(conf, ConfVars.HIVEINPUTFORMAT, mWork.getInputformat());
     }
-    if (mWork.getIndexIntermediateFile() != null) {
-      conf.set(ConfVars.HIVE_INDEX_COMPACT_FILE.varname, 
mWork.getIndexIntermediateFile());
-      conf.set(ConfVars.HIVE_INDEX_BLOCKFILTER_FILE.varname, 
mWork.getIndexIntermediateFile());
-    }
-
     // Intentionally overwrites anything the user may have put here
     conf.setBoolean("hive.input.format.sorted", mWork.isInputFormatSorted());
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/AbstractIndexHandler.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/index/AbstractIndexHandler.java 
b/ql/src/java/org/apache/hadoop/hive/ql/index/AbstractIndexHandler.java
deleted file mode 100644
index 3424600..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/AbstractIndexHandler.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index;
-
-import java.util.List;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.ql.metadata.HiveUtils;
-import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-
-/**
- * Abstract base class for index handlers.  This is provided as insulation
- * so that as HiveIndexHandler evolves, default implementations of new
- * methods can be added here in order to avoid breaking existing
- * plugin implementations.
- */
-public abstract class AbstractIndexHandler implements HiveIndexHandler {
-  
-  public static String getColumnNames(List<FieldSchema> fieldSchemas) {
-    StringBuilder sb = new StringBuilder();
-    for (int i = 0; i < fieldSchemas.size(); i++) {
-      if (i > 0) {
-        sb.append(",");
-      }
-      sb.append(HiveUtils.unparseIdentifier(fieldSchemas.get(i).getName()));
-    }
-    return sb.toString();
-  }
-
-  public void generateIndexQuery(Index index, ExprNodeDesc predicate,
-    ParseContext pctx, HiveIndexQueryContext queryContext) {
-    queryContext.setQueryTasks(null);
-    return;
-  }
-
-  public boolean checkQuerySize(long inputSize, HiveConf conf) {
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java 
b/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java
deleted file mode 100644
index fb77096..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index;
-
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.hooks.ReadEntity;
-import org.apache.hadoop.hive.ql.hooks.WriteEntity;
-import org.apache.hadoop.hive.ql.index.compact.CompactIndexHandler;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.HiveUtils;
-import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
-import org.apache.hadoop.hive.ql.optimizer.IndexUtils;
-import org.apache.hadoop.hive.ql.plan.PartitionDesc;
-import org.apache.hadoop.hive.ql.session.LineageState;
-
-/**
- * Index handler for indexes that have aggregate functions on indexed columns.
- *
- */
-public class AggregateIndexHandler extends CompactIndexHandler {
-
-    @Override
-    public void analyzeIndexDefinition(Table baseTable, Index index,
-        Table indexTable) throws HiveException {
-      StorageDescriptor storageDesc = index.getSd();
-      if (this.usesIndexTable() && indexTable != null) {
-        StorageDescriptor indexTableSd = storageDesc.deepCopy();
-        List<FieldSchema> indexTblCols = indexTableSd.getCols();
-        FieldSchema bucketFileName = new FieldSchema("_bucketname", "string", 
"");
-        indexTblCols.add(bucketFileName);
-        FieldSchema offSets = new FieldSchema("_offsets", "array<bigint>", "");
-        indexTblCols.add(offSets);
-        Map<String, String> paraList = index.getParameters();
-
-        if(paraList != null && paraList.containsKey("AGGREGATES")){
-          String propValue = paraList.get("AGGREGATES");
-          if(propValue.contains(",")){
-            String[] aggFuncs = propValue.split(",");
-            for (int i = 0; i < aggFuncs.length; i++) {
-              createAggregationFunction(indexTblCols, aggFuncs[i]);
-            }
-          }else{
-            createAggregationFunction(indexTblCols, propValue);
-         }
-        }
-        indexTable.setSd(indexTableSd);
-      }
-    }
-
-    private void createAggregationFunction(List<FieldSchema> indexTblCols, 
String property){
-      String[] aggFuncCol = property.split("\\(");
-      String funcName = aggFuncCol[0];
-      String colName = aggFuncCol[1].substring(0, aggFuncCol[1].length() - 1);
-      if(colName.contains("*")){
-        colName = colName.replace("*", "all");
-      }
-      FieldSchema aggregationFunction =
-        new FieldSchema("_" + funcName + "_of_" + colName + "", "bigint", "");
-      indexTblCols.add(aggregationFunction);
-    }
-
-    @Override
-    protected Task<?> getIndexBuilderMapRedTask(Set<ReadEntity> inputs,
-        Set<WriteEntity> outputs,
-        Index index, boolean partitioned,
-        PartitionDesc indexTblPartDesc, String indexTableName,
-        PartitionDesc baseTablePartDesc, String baseTableName, String dbName,
-        LineageState lineageState) {
-
-      List<FieldSchema> indexField = index.getSd().getCols();
-      String indexCols = 
HiveUtils.getUnparsedColumnNamesFromFieldSchema(indexField);
-
-      //form a new insert overwrite query.
-      StringBuilder command= new StringBuilder();
-      Map<String, String> partSpec = indexTblPartDesc.getPartSpec();
-
-      command.append("INSERT OVERWRITE TABLE " + 
HiveUtils.unparseIdentifier(indexTableName));
-      if (partitioned && indexTblPartDesc != null) {
-        command.append(" PARTITION ( ");
-        List<String> ret = getPartKVPairStringArray((LinkedHashMap<String, 
String>) partSpec);
-        for (int i = 0; i < ret.size(); i++) {
-          String partKV = ret.get(i);
-          command.append(partKV);
-          if (i < ret.size() - 1) {
-            command.append(",");
-          }
-        }
-        command.append(" ) ");
-      }
-
-      command.append(" SELECT ");
-      command.append(indexCols);
-      command.append(",");
-
-      command.append(VirtualColumn.FILENAME.getName());
-      command.append(",");
-      command.append(" collect_set (");
-      command.append(VirtualColumn.BLOCKOFFSET.getName());
-      command.append(") ");
-      command.append(",");
-
-      assert indexField.size()==1;
-
-      Map<String, String> paraList = index.getParameters();
-      if(paraList != null && paraList.containsKey("AGGREGATES")){
-          command.append(paraList.get("AGGREGATES") + " ");
-      }
-
-      command.append(" FROM " + HiveUtils.unparseIdentifier(baseTableName));
-      Map<String, String> basePartSpec = baseTablePartDesc.getPartSpec();
-      if(basePartSpec != null) {
-        command.append(" WHERE ");
-        List<String> pkv = getPartKVPairStringArray((LinkedHashMap<String, 
String>) basePartSpec);
-        for (int i = 0; i < pkv.size(); i++) {
-          String partKV = pkv.get(i);
-          command.append(partKV);
-          if (i < pkv.size() - 1) {
-            command.append(" AND ");
-          }
-        }
-      }
-      command.append(" GROUP BY ");
-      command.append(indexCols + ", " + VirtualColumn.FILENAME.getName());
-
-      HiveConf builderConf = new HiveConf(getConf(), 
AggregateIndexHandler.class);
-      builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGEMAPFILES, false);
-      builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGEMAPREDFILES, false);
-      builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGETEZFILES, false);
-      Task<?> rootTask = IndexUtils.createRootTask(builderConf, inputs, 
outputs,
-          command, (LinkedHashMap<String, String>) partSpec, indexTableName, 
dbName, lineageState);
-      return rootTask;
-    }
-  }

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java 
b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java
deleted file mode 100644
index 30ae484..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.index;
-
-import org.apache.hadoop.hive.ql.index.bitmap.BitmapIndexHandler;
-import org.apache.hadoop.hive.ql.index.compact.CompactIndexHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Holds index related constants
- */
-public class HiveIndex {
-  public static final Logger l4j = LoggerFactory.getLogger("HiveIndex");
-  public static final String INDEX_TABLE_CREATETIME = 
"hive.index.basetbl.dfs.lastModifiedTime";
-
-  public static enum IndexType {
-    AGGREGATE_TABLE("aggregate",  AggregateIndexHandler.class.getName()),
-    COMPACT_SUMMARY_TABLE("compact", CompactIndexHandler.class.getName()),
-    BITMAP_TABLE("bitmap", BitmapIndexHandler.class.getName());
-
-    private IndexType(String indexType, String className) {
-      indexTypeName = indexType;
-      this.handlerClsName = className;
-    }
-
-    private final String indexTypeName;
-    private final String handlerClsName;
-
-    public String getName() {
-      return indexTypeName;
-    }
-
-    public String getHandlerClsName() {
-      return handlerClsName;
-    }
-  }
-
-  public static IndexType getIndexType(String name) {
-    IndexType[] types = IndexType.values();
-    for (IndexType type : types) {
-      if(type.getName().equals(name.toLowerCase())) {
-        return type;
-      }
-    }
-    return null;
-  }
-
-  public static IndexType getIndexTypeByClassName(String className) {
-    IndexType[] types = IndexType.values();
-    for (IndexType type : types) {
-      if(type.getHandlerClsName().equals(className)) {
-        return type;
-      }
-    }
-    return null;
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java 
b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java
deleted file mode 100644
index 8facd91..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index;
-
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.hooks.ReadEntity;
-import org.apache.hadoop.hive.ql.hooks.WriteEntity;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.ql.session.LineageState;
-
-/**
- * HiveIndexHandler defines a pluggable interface for adding new index handlers
- * to Hive.
- */
-public interface HiveIndexHandler extends Configurable {
-  /**
-   * Determines whether this handler implements indexes by creating an index
-   * table.
-   *
-   * @return true if index creation implies creation of an index table in Hive;
-   *         false if the index representation is not stored in a Hive table
-   */
-  boolean usesIndexTable();
-
-  /**
-   * Requests that the handler validate an index definition and fill in
-   * additional information about its stored representation.
-   *
-   * @param baseTable
-   *          the definition of the table being indexed
-   *
-   * @param index
-   *          the definition of the index being created
-   *
-   * @param indexTable
-   *          a partial definition of the index table to be used for storing 
the
-   *          index representation, or null if usesIndexTable() returns false;
-   *          the handler can augment the index's storage descriptor (e.g. with
-   *          information about input/output format) and/or the index table's
-   *          definition (typically with additional columns containing the 
index
-   *          representation, e.g. pointers into HDFS).
-   *
-   * @throws HiveException if the index definition is invalid with respect to
-   *         either the base table or the supplied index table definition
-   */
-  void analyzeIndexDefinition(
-      org.apache.hadoop.hive.metastore.api.Table baseTable,
-      org.apache.hadoop.hive.metastore.api.Index index,
-      org.apache.hadoop.hive.metastore.api.Table indexTable)
-      throws HiveException;
-
-  /**
-   * Requests that the handler generate a plan for building the index; the plan
-   * should read the base table and write out the index representation.
-   *
-   * @param baseTbl
-   *          the definition of the table being indexed
-   *
-   * @param index
-   *          the definition of the index
-   *
-   * @param baseTblPartitions
-   *          list of base table partitions with each element mirrors to the
-   *          corresponding one in indexTblPartitions
-   *
-   * @param indexTbl
-   *          the definition of the index table, or null if usesIndexTable()
-   *          returns null
-   *
-   * @param inputs
-   *          inputs for hooks, supplemental outputs going
-   *          along with the return value
-   *
-   * @param outputs
-   *          outputs for hooks, supplemental outputs going
-   *          along with the return value
-   *
-   * @param lineageState
-   *          tracks Lineage for the query
-   *
-   * @return list of tasks to be executed in parallel for building the index
-   *
-   * @throws HiveException if plan generation fails
-   */
-  List<Task<?>> generateIndexBuildTaskList(
-      org.apache.hadoop.hive.ql.metadata.Table baseTbl,
-      org.apache.hadoop.hive.metastore.api.Index index,
-      List<Partition> indexTblPartitions, List<Partition> baseTblPartitions,
-      org.apache.hadoop.hive.ql.metadata.Table indexTbl,
-      Set<ReadEntity> inputs, Set<WriteEntity> outputs,  LineageState 
lineageState)
-      throws HiveException;
-
-  /**
-   * Generate the list of tasks required to run an index optimized sub-query 
for the
-   * given predicate, using the given indexes. If multiple indexes are
-   * provided, it is up to the handler whether to use none, one, some or all of
-   * them. The supplied predicate may reference any of the columns from any of
-   * the indexes. If the handler decides to use more than one index, it is
-   * responsible for generating tasks to combine their search results
-   * (e.g. performing a JOIN on the result).
-   * @param indexes
-   * @param predicate
-   * @param pctx
-   * @param queryContext contains results, such as query tasks and input 
configuration
-   */
-  void generateIndexQuery(List<Index> indexes, ExprNodeDesc predicate,
-    ParseContext pctx, HiveIndexQueryContext queryContext);
-
-  /**
-   * Check the size of an input query to make sure it fits within the bounds
-   *
-   * @param inputSize size (in bytes) of the query in question
-   * @param conf
-   * @return true if query is within the bounds
-   */
-  boolean checkQuerySize(long inputSize, HiveConf conf);
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexQueryContext.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexQueryContext.java 
b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexQueryContext.java
deleted file mode 100644
index b736541..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexQueryContext.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.index;
-
-import java.io.Serializable;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.hooks.ReadEntity;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-
-/**
- * Used to pass information between the IndexProcessor and the plugin
- * IndexHandler during query processing
- *
- */
-public class HiveIndexQueryContext {
-
-  private Set<ReadEntity> additionalSemanticInputs; // additional inputs to 
add to the parse context when
-                                                        // merging the index 
query tasks
-  private String indexInputFormat;        // input format to set on the 
TableScanOperator to activate indexing
-  private String indexIntermediateFile;   // name of intermediate file written 
by the index query for the
-                                          // TableScanOperator to use
-  private List<Task<? extends Serializable>> queryTasks;      // list of tasks 
that will execute the index query and write
-                                                              // results to a 
temporary file
-  private ExprNodeDesc residualPredicate; // predicate that could not be 
processed by an index handler
-                                          // and should be used on the base 
table scan (see HIVE-2115)
-  private Set<Partition> queryPartitions; // partitions accessed by the 
original query
-
-  public HiveIndexQueryContext() {
-    this.additionalSemanticInputs = null;
-    this.indexInputFormat = null;
-    this.indexIntermediateFile = null;
-    this.queryTasks = null;
-  }
-
-  public Set<ReadEntity> getAdditionalSemanticInputs() {
-    return additionalSemanticInputs;
-  }
-  public void addAdditionalSemanticInputs(Set<ReadEntity> 
additionalParseInputs) {
-    if (this.additionalSemanticInputs == null) {
-      this.additionalSemanticInputs = new LinkedHashSet<ReadEntity>();
-    }
-    this.additionalSemanticInputs.addAll(additionalParseInputs);
-  }
-
-  public String getIndexInputFormat() {
-    return indexInputFormat;
-  }
-  public void setIndexInputFormat(String indexInputFormat) {
-    this.indexInputFormat = indexInputFormat;
-  }
-
-  public String getIndexIntermediateFile() {
-    return indexIntermediateFile;
-  }
-  public void setIndexIntermediateFile(String indexIntermediateFile) {
-    this.indexIntermediateFile = indexIntermediateFile;
-  }
-
-  public List<Task<? extends Serializable>> getQueryTasks() {
-    return queryTasks;
-  }
-  public void setQueryTasks(List<Task<? extends Serializable>> 
indexQueryTasks) {
-    this.queryTasks = indexQueryTasks;
-  }
-
-  public void setResidualPredicate(ExprNodeDesc residualPredicate) {
-    this.residualPredicate = residualPredicate;
-  }
-
-  public ExprNodeDesc getResidualPredicate() {
-    return residualPredicate;
-  }
-
-  public Set<Partition> getQueryPartitions() {
-    return queryPartitions;
-  }
-
-  public void setQueryPartitions(Set<Partition> queryPartitions) {
-    this.queryPartitions = queryPartitions;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java 
b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java
deleted file mode 100644
index 6697066..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.index;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable;
-import org.apache.hadoop.hive.serde2.lazy.LazySerDeParameters;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.FileSplit;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.LineRecordReader.LineReader;
-
-/**
- * HiveIndexResult parses the input stream from an index query
- * to generate a list of file splits to query.
- */
-public class HiveIndexResult implements IndexResult {
-
-  public static final Logger l4j =
-    LoggerFactory.getLogger(HiveIndexResult.class.getSimpleName());
-
-  // IndexBucket
-  static class IBucket {
-    private String name = null;
-    private final SortedSet<Long> offsets = new TreeSet<Long>();
-
-    public IBucket(String n) {
-      name = n;
-    }
-
-    public void add(Long offset) {
-      offsets.add(offset);
-    }
-
-    public String getName() {
-      return name;
-    }
-
-    public SortedSet<Long> getOffsets() {
-      return offsets;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (obj.getClass() != this.getClass()) {
-        return false;
-      }
-      return (((IBucket) obj).name.compareToIgnoreCase(this.name) == 0);
-    }
-  }
-
-  JobConf job = null;
-  BytesRefWritable[] bytesRef = new BytesRefWritable[2];
-  boolean ignoreHdfsLoc = false;
-
-  public HiveIndexResult(List<String> indexFiles, JobConf conf) throws 
IOException,
-      HiveException {
-    job = conf;
-
-    bytesRef[0] = new BytesRefWritable();
-    bytesRef[1] = new BytesRefWritable();
-    ignoreHdfsLoc = HiveConf.getBoolVar(conf, 
HiveConf.ConfVars.HIVE_INDEX_IGNORE_HDFS_LOC);
-
-    if (indexFiles != null && indexFiles.size() > 0) {
-      List<Path> paths = new ArrayList<Path>();
-      for (String indexFile : indexFiles) {
-        Path indexFilePath = new Path(indexFile);
-        FileSystem fs = indexFilePath.getFileSystem(conf);
-        FileStatus indexStat = fs.getFileStatus(indexFilePath);
-        if (indexStat.isDir()) {
-          FileStatus[] fss = fs.listStatus(indexFilePath, 
FileUtils.HIDDEN_FILES_PATH_FILTER);
-          for (FileStatus f : fss) {
-            paths.add(f.getPath());
-          }
-        } else {
-          paths.add(indexFilePath);
-        }
-      }
-
-      long maxEntriesToLoad = HiveConf.getLongVar(conf, 
HiveConf.ConfVars.HIVE_INDEX_COMPACT_QUERY_MAX_ENTRIES);
-      if (maxEntriesToLoad < 0) {
-        maxEntriesToLoad=Long.MAX_VALUE;
-      }
-
-      long lineCounter = 0;
-      for (Path indexFinalPath : paths) {
-        FileSystem fs = indexFinalPath.getFileSystem(conf);
-        FSDataInputStream ifile = fs.open(indexFinalPath);
-        LineReader lr = new LineReader(ifile, conf);
-        try {
-          Text line = new Text();
-          while (lr.readLine(line) > 0) {
-            if (++lineCounter > maxEntriesToLoad) {
-              throw new HiveException("Number of compact index entries loaded 
during the query exceeded the maximum of " + maxEntriesToLoad
-                  + " set in " + 
HiveConf.ConfVars.HIVE_INDEX_COMPACT_QUERY_MAX_ENTRIES.varname);
-            }
-            add(line);
-          }
-        }
-        finally {
-          // this will close the input stream
-          lr.close();
-        }
-      }
-    }
-  }
-
-  Map<String, IBucket> buckets = new HashMap<String, IBucket>();
-
-  private void add(Text line) throws HiveException {
-    String l = line.toString();
-    byte[] bytes = l.getBytes();
-    int firstEnd = 0;
-    int i = 0;
-    for (int index = 0; index < bytes.length; index++) {
-      if (bytes[index] == LazySerDeParameters.DefaultSeparators[0]) {
-        i++;
-        firstEnd = index;
-      }
-    }
-    if (i > 1) {
-      throw new HiveException(
-          "Bad index file row (index file should only contain two columns: 
bucket_file_name and offset lists.) ."
-              + line.toString());
-    }
-    String bucketFileName = new String(bytes, 0, firstEnd);
-
-    if (ignoreHdfsLoc) {
-      Path tmpPath = new Path(bucketFileName);
-      bucketFileName = tmpPath.toUri().getPath();
-    }
-    IBucket bucket = buckets.get(bucketFileName);
-    if (bucket == null) {
-      bucket = new IBucket(bucketFileName);
-      buckets.put(bucketFileName, bucket);
-    }
-
-    int currentStart = firstEnd + 1;
-    int currentEnd = firstEnd + 1;
-    for (; currentEnd < bytes.length; currentEnd++) {
-      if (bytes[currentEnd] == LazySerDeParameters.DefaultSeparators[1]) {
-        String one_offset = new String(bytes, currentStart, currentEnd
-            - currentStart);
-        Long offset = Long.parseLong(one_offset);
-        bucket.getOffsets().add(offset);
-        currentStart = currentEnd + 1;
-      }
-    }
-    String one_offset = new String(bytes, currentStart, currentEnd
-        - currentStart);
-    bucket.getOffsets().add(Long.parseLong(one_offset));
-  }
-
-  @Override
-  public boolean contains(FileSplit split) throws HiveException {
-
-    if (buckets == null) {
-      return false;
-    }
-    String bucketName = split.getPath().toString();
-    IBucket bucket = buckets.get(bucketName);
-    if (bucket == null) {
-      bucketName = split.getPath().toUri().getPath();
-      bucket = buckets.get(bucketName);
-      if (bucket == null) {
-        return false;
-      }
-    }
-
-    for (Long offset : bucket.getOffsets()) {
-      if ((offset >= split.getStart())
-          && (offset <= split.getStart() + split.getLength())) {
-        return true;
-      }
-    }
-    return false;
-  }
-}

Reply via email to