http://git-wip-us.apache.org/repos/asf/hive/blob/b8aa16ff/ql/src/test/results/clientpositive/llap/parquet_complex_types_vectorization.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/parquet_complex_types_vectorization.q.out
 
b/ql/src/test/results/clientpositive/llap/parquet_complex_types_vectorization.q.out
new file mode 100644
index 0000000..cedbb3d
--- /dev/null
+++ 
b/ql/src/test/results/clientpositive/llap/parquet_complex_types_vectorization.q.out
@@ -0,0 +1,926 @@
+PREHOOK: query: DROP TABLE parquet_complex_types_staging
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE parquet_complex_types_staging
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE parquet_complex_types
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE parquet_complex_types
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE parquet_complex_types_staging (
+id int,
+m1 map<string, varchar(5)>,
+l1 array<int>,
+st1 struct<c1:int, c2:string>,
+listIndex int
+) ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+COLLECTION ITEMS TERMINATED BY ','
+MAP KEYS TERMINATED BY ':'
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_complex_types_staging
+POSTHOOK: query: CREATE TABLE parquet_complex_types_staging (
+id int,
+m1 map<string, varchar(5)>,
+l1 array<int>,
+st1 struct<c1:int, c2:string>,
+listIndex int
+) ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+COLLECTION ITEMS TERMINATED BY ','
+MAP KEYS TERMINATED BY ':'
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_complex_types_staging
+PREHOOK: query: CREATE TABLE parquet_complex_types (
+id int,
+m1 map<string, varchar(5)>,
+l1 array<int>,
+st1 struct<c1:int, c2:string>,
+listIndex int
+) STORED AS PARQUET
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_complex_types
+POSTHOOK: query: CREATE TABLE parquet_complex_types (
+id int,
+m1 map<string, varchar(5)>,
+l1 array<int>,
+st1 struct<c1:int, c2:string>,
+listIndex int
+) STORED AS PARQUET
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_complex_types
+PREHOOK: query: LOAD DATA LOCAL INPATH 
'../../data/files/parquet_complex_types.txt' OVERWRITE INTO TABLE 
parquet_complex_types_staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@parquet_complex_types_staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH 
'../../data/files/parquet_complex_types.txt' OVERWRITE INTO TABLE 
parquet_complex_types_staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@parquet_complex_types_staging
+PREHOOK: query: INSERT OVERWRITE TABLE parquet_complex_types
+SELECT id, m1, l1, st1, listIndex FROM parquet_complex_types_staging where id 
< 1024
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types_staging
+PREHOOK: Output: default@parquet_complex_types
+POSTHOOK: query: INSERT OVERWRITE TABLE parquet_complex_types
+SELECT id, m1, l1, st1, listIndex FROM parquet_complex_types_staging where id 
< 1024
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types_staging
+POSTHOOK: Output: default@parquet_complex_types
+POSTHOOK: Lineage: parquet_complex_types.id SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:id,
 type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.l1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:l1,
 type:array<int>, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.listindex SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:listindex,
 type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.m1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:m1,
 type:map<string,varchar(5)>, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.st1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:st1,
 type:struct<c1:int,c2:string>, comment:null), ]
+PREHOOK: query: select count(*) from parquet_complex_types
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from parquet_complex_types
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+1023
+PREHOOK: query: explain vectorization expression select l1, l1[0], l1[1], 
l1[listIndex], listIndex from parquet_complex_types limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select l1, l1[0], l1[1], 
l1[listIndex], listIndex from parquet_complex_types limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: parquet_complex_types
+                  Statistics: Num rows: 1023 Data size: 120652 Basic stats: 
COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                  Select Operator
+                    expressions: l1 (type: array<int>), l1[0] (type: int), 
l1[1] (type: int), l1[listindex] (type: int), listindex (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [2, 6, 7, 8, 4]
+                        selectExpressions: ListIndexColScalar(col 
2:array<int>, col 0:int) -> 6:int, ListIndexColScalar(col 2:array<int>, col 
1:int) -> 7:int, ListIndexColColumn(col 2:array<int>, col 4:int) -> 8:int
+                    Statistics: Num rows: 1023 Data size: 120652 Basic stats: 
COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 10
+                      Limit Vectorization:
+                          className: VectorLimitOperator
+                          native: true
+                      Statistics: Num rows: 10 Data size: 1170 Basic stats: 
COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        File Sink Vectorization:
+                            className: VectorFileSinkOperator
+                            native: false
+                        Statistics: Num rows: 10 Data size: 1170 Basic stats: 
COMPLETE Column stats: NONE
+                        table:
+                            input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs (cache only)
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select l1, l1[0], l1[1], l1[listIndex], listIndex from 
parquet_complex_types limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select l1, l1[0], l1[1], l1[listIndex], listIndex from 
parquet_complex_types limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+[100,101]      100     101     100     0
+[102,103]      102     103     103     1
+[104,105]      104     105     104     0
+[106,107]      106     107     107     1
+[108,109]      108     109     108     0
+[110,111]      110     111     111     1
+[112,113]      112     113     112     0
+[114,115]      114     115     115     1
+[116,117]      116     117     116     0
+[118,119]      118     119     119     1
+PREHOOK: query: explain vectorization expression select sum(l1[0]), l1[1] from 
parquet_complex_types where l1[0] > 1000 group by l1[1] order by l1[1] limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select sum(l1[0]), l1[1] 
from parquet_complex_types where l1[0] > 1000 group by l1[1] order by l1[1] 
limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: parquet_complex_types
+                  Statistics: Num rows: 1023 Data size: 116760 Basic stats: 
COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                  Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: 
FilterLongColGreaterLongScalar(col 6:int, val 1000)(children: 
ListIndexColScalar(col 2:array<int>, col 0:int) -> 6:int)
+                    predicate: (l1[0] > 1000) (type: boolean)
+                    Statistics: Num rows: 341 Data size: 38920 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: l1[1] (type: int), l1[0] (type: int)
+                      outputColumnNames: _col0, _col1
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumnNums: [6, 7]
+                          selectExpressions: ListIndexColScalar(col 
2:array<int>, col 1:int) -> 6:int, ListIndexColScalar(col 2:array<int>, col 
0:int) -> 7:int
+                      Statistics: Num rows: 341 Data size: 38920 Basic stats: 
COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: sum(_col1)
+                        Group By Vectorization:
+                            aggregators: VectorUDAFSumLong(col 7:int) -> bigint
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 6:int
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: [0]
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 341 Data size: 38920 Basic 
stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkLongOperator
+                              native: true
+                              nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          Statistics: Num rows: 341 Data size: 38920 Basic 
stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs (cache only)
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                Group By Vectorization:
+                    aggregators: VectorUDAFSumLong(col 1:bigint) -> bigint
+                    className: VectorGroupByOperator
+                    groupByMode: MERGEPARTIAL
+                    keyExpressions: col 0:int
+                    native: false
+                    vectorProcessingMode: MERGE_PARTIAL
+                    projectedOutputColumnNums: [0]
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 170 Data size: 19402 Basic stats: 
COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col1 (type: bigint), _col0 (type: int)
+                  outputColumnNames: _col0, _col2
+                  Select Vectorization:
+                      className: VectorSelectOperator
+                      native: true
+                      projectedOutputColumnNums: [1, 0]
+                  Statistics: Num rows: 170 Data size: 19402 Basic stats: 
COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col2 (type: int)
+                    sort order: +
+                    Reduce Sink Vectorization:
+                        className: VectorReduceSinkObjectHashOperator
+                        native: true
+                        nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    Statistics: Num rows: 170 Data size: 19402 Basic stats: 
COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: bigint), KEY.reducesinkkey0 
(type: int)
+                outputColumnNames: _col0, _col1
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [1, 0]
+                Statistics: Num rows: 170 Data size: 19402 Basic stats: 
COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 10
+                  Limit Vectorization:
+                      className: VectorLimitOperator
+                      native: true
+                  Statistics: Num rows: 10 Data size: 1140 Basic stats: 
COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    File Sink Vectorization:
+                        className: VectorFileSinkOperator
+                        native: false
+                    Statistics: Num rows: 10 Data size: 1140 Basic stats: 
COMPLETE Column stats: NONE
+                    table:
+                        input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select sum(l1[0]), l1[1] from parquet_complex_types where 
l1[0] > 1000 group by l1[1] order by l1[1] desc limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(l1[0]), l1[1] from parquet_complex_types where 
l1[0] > 1000 group by l1[1] order by l1[1] desc limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+2144   2145
+2142   2143
+2140   2141
+2138   2139
+2136   2137
+2134   2135
+2132   2133
+2130   2131
+2128   2129
+2126   2127
+PREHOOK: query: INSERT OVERWRITE TABLE parquet_complex_types
+SELECT id, m1, l1, st1, listIndex FROM parquet_complex_types_staging where id 
< 1025
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types_staging
+PREHOOK: Output: default@parquet_complex_types
+POSTHOOK: query: INSERT OVERWRITE TABLE parquet_complex_types
+SELECT id, m1, l1, st1, listIndex FROM parquet_complex_types_staging where id 
< 1025
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types_staging
+POSTHOOK: Output: default@parquet_complex_types
+POSTHOOK: Lineage: parquet_complex_types.id SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:id,
 type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.l1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:l1,
 type:array<int>, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.listindex SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:listindex,
 type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.m1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:m1,
 type:map<string,varchar(5)>, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.st1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:st1,
 type:struct<c1:int,c2:string>, comment:null), ]
+PREHOOK: query: select count(*) from parquet_complex_types
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from parquet_complex_types
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+1024
+PREHOOK: query: explain vectorization expression select l1, l1[0], l1[1], 
l1[listIndex], listIndex from parquet_complex_types limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select l1, l1[0], l1[1], 
l1[listIndex], listIndex from parquet_complex_types limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: parquet_complex_types
+                  Statistics: Num rows: 1024 Data size: 120776 Basic stats: 
COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                  Select Operator
+                    expressions: l1 (type: array<int>), l1[0] (type: int), 
l1[1] (type: int), l1[listindex] (type: int), listindex (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [2, 6, 7, 8, 4]
+                        selectExpressions: ListIndexColScalar(col 
2:array<int>, col 0:int) -> 6:int, ListIndexColScalar(col 2:array<int>, col 
1:int) -> 7:int, ListIndexColColumn(col 2:array<int>, col 4:int) -> 8:int
+                    Statistics: Num rows: 1024 Data size: 120776 Basic stats: 
COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 10
+                      Limit Vectorization:
+                          className: VectorLimitOperator
+                          native: true
+                      Statistics: Num rows: 10 Data size: 1170 Basic stats: 
COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        File Sink Vectorization:
+                            className: VectorFileSinkOperator
+                            native: false
+                        Statistics: Num rows: 10 Data size: 1170 Basic stats: 
COMPLETE Column stats: NONE
+                        table:
+                            input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs (cache only)
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select l1, l1[0], l1[1], l1[listIndex], listIndex from 
parquet_complex_types limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select l1, l1[0], l1[1], l1[listIndex], listIndex from 
parquet_complex_types limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+[100,101]      100     101     100     0
+[102,103]      102     103     103     1
+[104,105]      104     105     104     0
+[106,107]      106     107     107     1
+[108,109]      108     109     108     0
+[110,111]      110     111     111     1
+[112,113]      112     113     112     0
+[114,115]      114     115     115     1
+[116,117]      116     117     116     0
+[118,119]      118     119     119     1
+PREHOOK: query: explain vectorization expression select sum(l1[0]), l1[1] from 
parquet_complex_types where l1[0] > 1000 group by l1[1] order by l1[1] limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select sum(l1[0]), l1[1] 
from parquet_complex_types where l1[0] > 1000 group by l1[1] order by l1[1] 
limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: parquet_complex_types
+                  Statistics: Num rows: 1024 Data size: 116880 Basic stats: 
COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                  Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: 
FilterLongColGreaterLongScalar(col 6:int, val 1000)(children: 
ListIndexColScalar(col 2:array<int>, col 0:int) -> 6:int)
+                    predicate: (l1[0] > 1000) (type: boolean)
+                    Statistics: Num rows: 341 Data size: 38921 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: l1[1] (type: int), l1[0] (type: int)
+                      outputColumnNames: _col0, _col1
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumnNums: [6, 7]
+                          selectExpressions: ListIndexColScalar(col 
2:array<int>, col 1:int) -> 6:int, ListIndexColScalar(col 2:array<int>, col 
0:int) -> 7:int
+                      Statistics: Num rows: 341 Data size: 38921 Basic stats: 
COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: sum(_col1)
+                        Group By Vectorization:
+                            aggregators: VectorUDAFSumLong(col 7:int) -> bigint
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 6:int
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: [0]
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 341 Data size: 38921 Basic 
stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkLongOperator
+                              native: true
+                              nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          Statistics: Num rows: 341 Data size: 38921 Basic 
stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs (cache only)
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                Group By Vectorization:
+                    aggregators: VectorUDAFSumLong(col 1:bigint) -> bigint
+                    className: VectorGroupByOperator
+                    groupByMode: MERGEPARTIAL
+                    keyExpressions: col 0:int
+                    native: false
+                    vectorProcessingMode: MERGE_PARTIAL
+                    projectedOutputColumnNums: [0]
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 170 Data size: 19403 Basic stats: 
COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col1 (type: bigint), _col0 (type: int)
+                  outputColumnNames: _col0, _col2
+                  Select Vectorization:
+                      className: VectorSelectOperator
+                      native: true
+                      projectedOutputColumnNums: [1, 0]
+                  Statistics: Num rows: 170 Data size: 19403 Basic stats: 
COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col2 (type: int)
+                    sort order: +
+                    Reduce Sink Vectorization:
+                        className: VectorReduceSinkObjectHashOperator
+                        native: true
+                        nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    Statistics: Num rows: 170 Data size: 19403 Basic stats: 
COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: bigint), KEY.reducesinkkey0 
(type: int)
+                outputColumnNames: _col0, _col1
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [1, 0]
+                Statistics: Num rows: 170 Data size: 19403 Basic stats: 
COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 10
+                  Limit Vectorization:
+                      className: VectorLimitOperator
+                      native: true
+                  Statistics: Num rows: 10 Data size: 1140 Basic stats: 
COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    File Sink Vectorization:
+                        className: VectorFileSinkOperator
+                        native: false
+                    Statistics: Num rows: 10 Data size: 1140 Basic stats: 
COMPLETE Column stats: NONE
+                    table:
+                        input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select sum(l1[0]), l1[1] from parquet_complex_types where 
l1[0] > 1000 group by l1[1] order by l1[1] desc limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(l1[0]), l1[1] from parquet_complex_types where 
l1[0] > 1000 group by l1[1] order by l1[1] desc limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+2146   2147
+2144   2145
+2142   2143
+2140   2141
+2138   2139
+2136   2137
+2134   2135
+2132   2133
+2130   2131
+2128   2129
+PREHOOK: query: INSERT OVERWRITE TABLE parquet_complex_types
+SELECT id, m1, l1, st1, listIndex FROM parquet_complex_types_staging
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types_staging
+PREHOOK: Output: default@parquet_complex_types
+POSTHOOK: query: INSERT OVERWRITE TABLE parquet_complex_types
+SELECT id, m1, l1, st1, listIndex FROM parquet_complex_types_staging
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types_staging
+POSTHOOK: Output: default@parquet_complex_types
+POSTHOOK: Lineage: parquet_complex_types.id SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:id,
 type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.l1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:l1,
 type:array<int>, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.listindex SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:listindex,
 type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.m1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:m1,
 type:map<string,varchar(5)>, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.st1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:st1,
 type:struct<c1:int,c2:string>, comment:null), ]
+PREHOOK: query: select count(*) from parquet_complex_types
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from parquet_complex_types
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+1025
+PREHOOK: query: explain vectorization expression select l1, l1[0], l1[1], 
l1[listIndex], listIndex from parquet_complex_types limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select l1, l1[0], l1[1], 
l1[listIndex], listIndex from parquet_complex_types limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: parquet_complex_types
+                  Statistics: Num rows: 1025 Data size: 120900 Basic stats: 
COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                  Select Operator
+                    expressions: l1 (type: array<int>), l1[0] (type: int), 
l1[1] (type: int), l1[listindex] (type: int), listindex (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [2, 6, 7, 8, 4]
+                        selectExpressions: ListIndexColScalar(col 
2:array<int>, col 0:int) -> 6:int, ListIndexColScalar(col 2:array<int>, col 
1:int) -> 7:int, ListIndexColColumn(col 2:array<int>, col 4:int) -> 8:int
+                    Statistics: Num rows: 1025 Data size: 120900 Basic stats: 
COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 10
+                      Limit Vectorization:
+                          className: VectorLimitOperator
+                          native: true
+                      Statistics: Num rows: 10 Data size: 1170 Basic stats: 
COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        File Sink Vectorization:
+                            className: VectorFileSinkOperator
+                            native: false
+                        Statistics: Num rows: 10 Data size: 1170 Basic stats: 
COMPLETE Column stats: NONE
+                        table:
+                            input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs (cache only)
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select l1, l1[0], l1[1], l1[listIndex], listIndex from 
parquet_complex_types limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select l1, l1[0], l1[1], l1[listIndex], listIndex from 
parquet_complex_types limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+[100,101]      100     101     100     0
+[102,103]      102     103     103     1
+[104,105]      104     105     104     0
+[106,107]      106     107     107     1
+[108,109]      108     109     108     0
+[110,111]      110     111     111     1
+[112,113]      112     113     112     0
+[114,115]      114     115     115     1
+[116,117]      116     117     116     0
+[118,119]      118     119     119     1
+PREHOOK: query: explain vectorization expression select sum(l1[0]), l1[1] from 
parquet_complex_types where l1[0] > 1000 group by l1[1] order by l1[1] limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select sum(l1[0]), l1[1] 
from parquet_complex_types where l1[0] > 1000 group by l1[1] order by l1[1] 
limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: parquet_complex_types
+                  Statistics: Num rows: 1025 Data size: 117000 Basic stats: 
COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                  Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: 
FilterLongColGreaterLongScalar(col 6:int, val 1000)(children: 
ListIndexColScalar(col 2:array<int>, col 0:int) -> 6:int)
+                    predicate: (l1[0] > 1000) (type: boolean)
+                    Statistics: Num rows: 341 Data size: 38923 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: l1[1] (type: int), l1[0] (type: int)
+                      outputColumnNames: _col0, _col1
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumnNums: [6, 7]
+                          selectExpressions: ListIndexColScalar(col 
2:array<int>, col 1:int) -> 6:int, ListIndexColScalar(col 2:array<int>, col 
0:int) -> 7:int
+                      Statistics: Num rows: 341 Data size: 38923 Basic stats: 
COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: sum(_col1)
+                        Group By Vectorization:
+                            aggregators: VectorUDAFSumLong(col 7:int) -> bigint
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 6:int
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: [0]
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 341 Data size: 38923 Basic 
stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkLongOperator
+                              native: true
+                              nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          Statistics: Num rows: 341 Data size: 38923 Basic 
stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs (cache only)
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                Group By Vectorization:
+                    aggregators: VectorUDAFSumLong(col 1:bigint) -> bigint
+                    className: VectorGroupByOperator
+                    groupByMode: MERGEPARTIAL
+                    keyExpressions: col 0:int
+                    native: false
+                    vectorProcessingMode: MERGE_PARTIAL
+                    projectedOutputColumnNums: [0]
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 170 Data size: 19404 Basic stats: 
COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col1 (type: bigint), _col0 (type: int)
+                  outputColumnNames: _col0, _col2
+                  Select Vectorization:
+                      className: VectorSelectOperator
+                      native: true
+                      projectedOutputColumnNums: [1, 0]
+                  Statistics: Num rows: 170 Data size: 19404 Basic stats: 
COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col2 (type: int)
+                    sort order: +
+                    Reduce Sink Vectorization:
+                        className: VectorReduceSinkObjectHashOperator
+                        native: true
+                        nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    Statistics: Num rows: 170 Data size: 19404 Basic stats: 
COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: bigint), KEY.reducesinkkey0 
(type: int)
+                outputColumnNames: _col0, _col1
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [1, 0]
+                Statistics: Num rows: 170 Data size: 19404 Basic stats: 
COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 10
+                  Limit Vectorization:
+                      className: VectorLimitOperator
+                      native: true
+                  Statistics: Num rows: 10 Data size: 1140 Basic stats: 
COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    File Sink Vectorization:
+                        className: VectorFileSinkOperator
+                        native: false
+                    Statistics: Num rows: 10 Data size: 1140 Basic stats: 
COMPLETE Column stats: NONE
+                    table:
+                        input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select sum(l1[0]), l1[1] from parquet_complex_types where 
l1[0] > 1000 group by l1[1] order by l1[1] desc limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(l1[0]), l1[1] from parquet_complex_types where 
l1[0] > 1000 group by l1[1] order by l1[1] desc limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+2148   2149
+2146   2147
+2144   2145
+2142   2143
+2140   2141
+2138   2139
+2136   2137
+2134   2135
+2132   2133
+2130   2131

http://git-wip-us.apache.org/repos/asf/hive/blob/b8aa16ff/ql/src/test/results/clientpositive/llap/vector_complex_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_complex_join.q.out 
b/ql/src/test/results/clientpositive/llap/vector_complex_join.q.out
index ee9e40a..37827b2 100644
--- a/ql/src/test/results/clientpositive/llap/vector_complex_join.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_complex_join.q.out
@@ -159,24 +159,25 @@ POSTHOOK: Input: default@test
 #### A masked pattern was here ####
 alltypesorc.ctinyint   alltypesorc.csmallint   alltypesorc.cint        
alltypesorc.cbigint     alltypesorc.cfloat      alltypesorc.cdouble     
alltypesorc.cstring1    alltypesorc.cstring2    alltypesorc.ctimestamp1 
alltypesorc.ctimestamp2 alltypesorc.cboolean1   alltypesorc.cboolean2   test.a  
test.b
 -51    NULL    199408978       -1800989684     -51.0   NULL    
34N4EY63M1GFWuW0boW     P4PL5h1eXR4mMLr2        1969-12-31 16:00:08.451 NULL    
false   true    199408978       {1:"val_1",2:"val_2"}
-PREHOOK: query: CREATE TABLE test2a (a ARRAY<INT>) STORED AS ORC
+PREHOOK: query: CREATE TABLE test2a (a ARRAY<INT>, index INT) STORED AS ORC
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@test2a
-POSTHOOK: query: CREATE TABLE test2a (a ARRAY<INT>) STORED AS ORC
+POSTHOOK: query: CREATE TABLE test2a (a ARRAY<INT>, index INT) STORED AS ORC
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@test2a
-PREHOOK: query: INSERT OVERWRITE TABLE test2a SELECT ARRAY(1, 2) FROM src 
LIMIT 1
+PREHOOK: query: INSERT OVERWRITE TABLE test2a SELECT ARRAY(1, 2), 1 FROM src 
LIMIT 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@test2a
-POSTHOOK: query: INSERT OVERWRITE TABLE test2a SELECT ARRAY(1, 2) FROM src 
LIMIT 1
+POSTHOOK: query: INSERT OVERWRITE TABLE test2a SELECT ARRAY(1, 2), 1 FROM src 
LIMIT 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@test2a
 POSTHOOK: Lineage: test2a.a EXPRESSION []
-_c0
+POSTHOOK: Lineage: test2a.index SIMPLE []
+_c0    _c1
 PREHOOK: query: CREATE TABLE test2b (a INT) STORED AS ORC
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -240,17 +241,17 @@ STAGE PLANS:
                           className: VectorMapJoinInnerLongOperator
                           native: true
                           nativeConditionsMet: 
hive.mapjoin.optimized.hashtable IS true, 
hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS 
true, Small table vectorizes IS true, Optimized Table and Supports Key Types IS 
true
-                      outputColumnNames: _col0, _col4
+                      outputColumnNames: _col0, _col4, _col5
                       input vertices:
                         1 Map 2
                       Statistics: Num rows: 3 Data size: 13 Basic stats: 
COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: _col0 (type: int), _col4 (type: 
array<int>)
-                        outputColumnNames: _col0, _col1
+                        expressions: _col0 (type: int), _col4 (type: 
array<int>), _col5 (type: int)
+                        outputColumnNames: _col0, _col1, _col2
                         Select Vectorization:
                             className: VectorSelectOperator
                             native: true
-                            projectedOutputColumnNums: [0, 2]
+                            projectedOutputColumnNums: [0, 2, 3]
                         Statistics: Num rows: 3 Data size: 13 Basic stats: 
COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
@@ -277,24 +278,38 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: test2a
-                  Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE 
Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
                   Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: SelectColumnIsNotNull(col 
3:int)(children: ListIndexColScalar(col 0:array<int>, col 1:int) -> 3:int)
                     predicate: a[1] is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 120 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 124 Basic stats: 
COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: a[1] (type: int)
                       sort order: +
                       Map-reduce partition columns: a[1] (type: int)
-                      Statistics: Num rows: 1 Data size: 120 Basic stats: 
COMPLETE Column stats: NONE
-                      value expressions: a (type: array<int>)
-            Execution mode: llap
+                      Reduce Sink Vectorization:
+                          className: VectorReduceSinkLongOperator
+                          keyExpressions: ListIndexColScalar(col 0:array<int>, 
col 1:int) -> 3:int
+                          native: true
+                          nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      Statistics: Num rows: 1 Data size: 124 Basic stats: 
COMPLETE Column stats: NONE
+                      value expressions: a (type: array<int>), index (type: 
int)
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
                 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: FILTER operator: Unexpected hive type 
name array<int>
-                vectorized: false
+                allNative: true
+                usesVectorUDFAdaptor: false
+                vectorized: true
 
   Stage: Stage-0
     Fetch Operator
@@ -312,5 +327,140 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@test2a
 POSTHOOK: Input: default@test2b
 #### A masked pattern was here ####
-test2b.a       test2a.a
-2      [1,2]
+test2b.a       test2a.a        test2a.index
+2      [1,2]   1
+PREHOOK: query: explain vectorization expression
+select *  from test2b join test2a on test2b.a = test2a.a[test2a.index]
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression
+select *  from test2b join test2a on test2b.a = test2a.a[test2a.index]
+POSTHOOK: type: QUERY
+Explain
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: test2b
+                  Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE 
Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                  Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: SelectColumnIsNotNull(col 0:int)
+                    predicate: a is not null (type: boolean)
+                    Statistics: Num rows: 3 Data size: 12 Basic stats: 
COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                      keys:
+                        0 a (type: int)
+                        1 a[index] (type: int)
+                      Map Join Vectorization:
+                          className: VectorMapJoinInnerLongOperator
+                          native: true
+                          nativeConditionsMet: 
hive.mapjoin.optimized.hashtable IS true, 
hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS 
true, Small table vectorizes IS true, Optimized Table and Supports Key Types IS 
true
+                      outputColumnNames: _col0, _col4, _col5
+                      input vertices:
+                        1 Map 2
+                      Statistics: Num rows: 3 Data size: 13 Basic stats: 
COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col0 (type: int), _col4 (type: 
array<int>), _col5 (type: int)
+                        outputColumnNames: _col0, _col1, _col2
+                        Select Vectorization:
+                            className: VectorSelectOperator
+                            native: true
+                            projectedOutputColumnNums: [0, 2, 3]
+                        Statistics: Num rows: 3 Data size: 13 Basic stats: 
COMPLETE Column stats: NONE
+                        File Output Operator
+                          compressed: false
+                          File Sink Vectorization:
+                              className: VectorFileSinkOperator
+                              native: false
+                          Statistics: Num rows: 3 Data size: 13 Basic stats: 
COMPLETE Column stats: NONE
+                          table:
+                              input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                              output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                              serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: test2a
+                  Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE 
Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                  Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: SelectColumnIsNotNull(col 
3:int)(children: ListIndexColColumn(col 0:array<int>, col 1:int) -> 3:int)
+                    predicate: a[index] is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 124 Basic stats: 
COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: a[index] (type: int)
+                      sort order: +
+                      Map-reduce partition columns: a[index] (type: int)
+                      Reduce Sink Vectorization:
+                          className: VectorReduceSinkLongOperator
+                          keyExpressions: ListIndexColColumn(col 0:array<int>, 
col 1:int) -> 3:int
+                          native: true
+                          nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      Statistics: Num rows: 1 Data size: 124 Basic stats: 
COMPLETE Column stats: NONE
+                      value expressions: a (type: array<int>), index (type: 
int)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: true
+                usesVectorUDFAdaptor: false
+                vectorized: true
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select *  from test2b join test2a on test2b.a = 
test2a.a[test2a.index]
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test2a
+PREHOOK: Input: default@test2b
+#### A masked pattern was here ####
+POSTHOOK: query: select *  from test2b join test2a on test2b.a = 
test2a.a[test2a.index]
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test2a
+POSTHOOK: Input: default@test2b
+#### A masked pattern was here ####
+test2b.a       test2a.a        test2a.index
+2      [1,2]   1

Reply via email to