http://git-wip-us.apache.org/repos/asf/hive/blob/b8aa16ff/ql/src/test/results/clientpositive/parquet_complex_types_vectorization.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/parquet_complex_types_vectorization.q.out 
b/ql/src/test/results/clientpositive/parquet_complex_types_vectorization.q.out
new file mode 100644
index 0000000..03488a1
--- /dev/null
+++ 
b/ql/src/test/results/clientpositive/parquet_complex_types_vectorization.q.out
@@ -0,0 +1,878 @@
+PREHOOK: query: DROP TABLE parquet_complex_types_staging
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE parquet_complex_types_staging
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE parquet_complex_types
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE parquet_complex_types
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE parquet_complex_types_staging (
+id int,
+m1 map<string, varchar(5)>,
+l1 array<int>,
+st1 struct<c1:int, c2:string>,
+listIndex int
+) ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+COLLECTION ITEMS TERMINATED BY ','
+MAP KEYS TERMINATED BY ':'
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_complex_types_staging
+POSTHOOK: query: CREATE TABLE parquet_complex_types_staging (
+id int,
+m1 map<string, varchar(5)>,
+l1 array<int>,
+st1 struct<c1:int, c2:string>,
+listIndex int
+) ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+COLLECTION ITEMS TERMINATED BY ','
+MAP KEYS TERMINATED BY ':'
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_complex_types_staging
+PREHOOK: query: CREATE TABLE parquet_complex_types (
+id int,
+m1 map<string, varchar(5)>,
+l1 array<int>,
+st1 struct<c1:int, c2:string>,
+listIndex int
+) STORED AS PARQUET
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_complex_types
+POSTHOOK: query: CREATE TABLE parquet_complex_types (
+id int,
+m1 map<string, varchar(5)>,
+l1 array<int>,
+st1 struct<c1:int, c2:string>,
+listIndex int
+) STORED AS PARQUET
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_complex_types
+PREHOOK: query: LOAD DATA LOCAL INPATH 
'../../data/files/parquet_complex_types.txt' OVERWRITE INTO TABLE 
parquet_complex_types_staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@parquet_complex_types_staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH 
'../../data/files/parquet_complex_types.txt' OVERWRITE INTO TABLE 
parquet_complex_types_staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@parquet_complex_types_staging
+PREHOOK: query: INSERT OVERWRITE TABLE parquet_complex_types
+SELECT id, m1, l1, st1, listIndex FROM parquet_complex_types_staging where id 
< 1024
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types_staging
+PREHOOK: Output: default@parquet_complex_types
+POSTHOOK: query: INSERT OVERWRITE TABLE parquet_complex_types
+SELECT id, m1, l1, st1, listIndex FROM parquet_complex_types_staging where id 
< 1024
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types_staging
+POSTHOOK: Output: default@parquet_complex_types
+POSTHOOK: Lineage: parquet_complex_types.id SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:id,
 type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.l1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:l1,
 type:array<int>, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.listindex SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:listindex,
 type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.m1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:m1,
 type:map<string,varchar(5)>, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.st1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:st1,
 type:struct<c1:int,c2:string>, comment:null), ]
+PREHOOK: query: select count(*) from parquet_complex_types
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from parquet_complex_types
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+1023
+PREHOOK: query: explain vectorization expression select l1, l1[0], l1[1], 
l1[listIndex], listIndex from parquet_complex_types limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select l1, l1[0], l1[1], 
l1[listIndex], listIndex from parquet_complex_types limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: parquet_complex_types
+            Statistics: Num rows: 1023 Data size: 5115 Basic stats: COMPLETE 
Column stats: NONE
+            TableScan Vectorization:
+                native: true
+            Select Operator
+              expressions: l1 (type: array<int>), l1[0] (type: int), l1[1] 
(type: int), l1[listindex] (type: int), listindex (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4
+              Select Vectorization:
+                  className: VectorSelectOperator
+                  native: true
+                  projectedOutputColumnNums: [2, 6, 7, 8, 4]
+                  selectExpressions: ListIndexColScalar(col 2:array<int>, col 
0:int) -> 6:int, ListIndexColScalar(col 2:array<int>, col 1:int) -> 7:int, 
ListIndexColColumn(col 2:array<int>, col 4:int) -> 8:int
+              Statistics: Num rows: 1023 Data size: 5115 Basic stats: COMPLETE 
Column stats: NONE
+              Limit
+                Number of rows: 10
+                Limit Vectorization:
+                    className: VectorLimitOperator
+                    native: true
+                Statistics: Num rows: 10 Data size: 50 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  File Sink Vectorization:
+                      className: VectorFileSinkOperator
+                      native: false
+                  Statistics: Num rows: 10 Data size: 50 Basic stats: COMPLETE 
Column stats: NONE
+                  table:
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: 
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select l1, l1[0], l1[1], l1[listIndex], listIndex from 
parquet_complex_types limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select l1, l1[0], l1[1], l1[listIndex], listIndex from 
parquet_complex_types limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+[100,101]      100     101     100     0
+[102,103]      102     103     103     1
+[104,105]      104     105     104     0
+[106,107]      106     107     107     1
+[108,109]      108     109     108     0
+[110,111]      110     111     111     1
+[112,113]      112     113     112     0
+[114,115]      114     115     115     1
+[116,117]      116     117     116     0
+[118,119]      118     119     119     1
+PREHOOK: query: explain vectorization expression select sum(l1[0]), l1[1] from 
parquet_complex_types where l1[0] > 1000 group by l1[1] order by l1[1] limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select sum(l1[0]), l1[1] 
from parquet_complex_types where l1[0] > 1000 group by l1[1] order by l1[1] 
limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: parquet_complex_types
+            Statistics: Num rows: 1023 Data size: 5115 Basic stats: COMPLETE 
Column stats: NONE
+            TableScan Vectorization:
+                native: true
+            Filter Operator
+              Filter Vectorization:
+                  className: VectorFilterOperator
+                  native: true
+                  predicateExpression: FilterLongColGreaterLongScalar(col 
6:int, val 1000)(children: ListIndexColScalar(col 2:array<int>, col 0:int) -> 
6:int)
+              predicate: (l1[0] > 1000) (type: boolean)
+              Statistics: Num rows: 341 Data size: 1705 Basic stats: COMPLETE 
Column stats: NONE
+              Select Operator
+                expressions: l1[1] (type: int), l1[0] (type: int)
+                outputColumnNames: _col0, _col1
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [6, 7]
+                    selectExpressions: ListIndexColScalar(col 2:array<int>, 
col 1:int) -> 6:int, ListIndexColScalar(col 2:array<int>, col 0:int) -> 7:int
+                Statistics: Num rows: 341 Data size: 1705 Basic stats: 
COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: sum(_col1)
+                  Group By Vectorization:
+                      aggregators: VectorUDAFSumLong(col 7:int) -> bigint
+                      className: VectorGroupByOperator
+                      groupByMode: HASH
+                      keyExpressions: col 6:int
+                      native: false
+                      vectorProcessingMode: HASH
+                      projectedOutputColumnNums: [0]
+                  keys: _col0 (type: int)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 341 Data size: 1705 Basic stats: 
COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: int)
+                    Reduce Sink Vectorization:
+                        className: VectorReduceSinkOperator
+                        native: false
+                        nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, 
No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, 
LazyBinarySerDe for values IS true
+                        nativeConditionsNotMet: hive.execution.engine mr IN 
[tez, spark] IS false
+                    Statistics: Num rows: 341 Data size: 1705 Basic stats: 
COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col1 (type: bigint)
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: 
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+      Reduce Vectorization:
+          enabled: false
+          enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
+          enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS 
false
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: sum(VALUE._col0)
+          keys: KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 170 Data size: 850 Basic stats: COMPLETE 
Column stats: NONE
+          Select Operator
+            expressions: _col1 (type: bigint), _col0 (type: int)
+            outputColumnNames: _col0, _col2
+            Statistics: Num rows: 170 Data size: 850 Basic stats: COMPLETE 
Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            TableScan Vectorization:
+                native: true
+            Reduce Output Operator
+              key expressions: _col2 (type: int)
+              sort order: +
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, 
No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, 
LazyBinarySerDe for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
+              Statistics: Num rows: 170 Data size: 850 Basic stats: COMPLETE 
Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col0 (type: bigint)
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+      Reduce Vectorization:
+          enabled: false
+          enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
+          enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS 
false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: bigint), KEY.reducesinkkey0 (type: 
int)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 170 Data size: 850 Basic stats: COMPLETE 
Column stats: NONE
+          Limit
+            Number of rows: 10
+            Statistics: Num rows: 10 Data size: 50 Basic stats: COMPLETE 
Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 10 Data size: 50 Basic stats: COMPLETE 
Column stats: NONE
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select sum(l1[0]), l1[1] from parquet_complex_types where 
l1[0] > 1000 group by l1[1] order by l1[1] desc limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(l1[0]), l1[1] from parquet_complex_types where 
l1[0] > 1000 group by l1[1] order by l1[1] desc limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+2144   2145
+2142   2143
+2140   2141
+2138   2139
+2136   2137
+2134   2135
+2132   2133
+2130   2131
+2128   2129
+2126   2127
+PREHOOK: query: INSERT OVERWRITE TABLE parquet_complex_types
+SELECT id, m1, l1, st1, listIndex FROM parquet_complex_types_staging where id 
< 1025
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types_staging
+PREHOOK: Output: default@parquet_complex_types
+POSTHOOK: query: INSERT OVERWRITE TABLE parquet_complex_types
+SELECT id, m1, l1, st1, listIndex FROM parquet_complex_types_staging where id 
< 1025
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types_staging
+POSTHOOK: Output: default@parquet_complex_types
+POSTHOOK: Lineage: parquet_complex_types.id SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:id,
 type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.l1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:l1,
 type:array<int>, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.listindex SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:listindex,
 type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.m1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:m1,
 type:map<string,varchar(5)>, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.st1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:st1,
 type:struct<c1:int,c2:string>, comment:null), ]
+PREHOOK: query: select count(*) from parquet_complex_types
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from parquet_complex_types
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+1024
+PREHOOK: query: explain vectorization expression select l1, l1[0], l1[1], 
l1[listIndex], listIndex from parquet_complex_types limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select l1, l1[0], l1[1], 
l1[listIndex], listIndex from parquet_complex_types limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: parquet_complex_types
+            Statistics: Num rows: 1024 Data size: 5120 Basic stats: COMPLETE 
Column stats: NONE
+            TableScan Vectorization:
+                native: true
+            Select Operator
+              expressions: l1 (type: array<int>), l1[0] (type: int), l1[1] 
(type: int), l1[listindex] (type: int), listindex (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4
+              Select Vectorization:
+                  className: VectorSelectOperator
+                  native: true
+                  projectedOutputColumnNums: [2, 6, 7, 8, 4]
+                  selectExpressions: ListIndexColScalar(col 2:array<int>, col 
0:int) -> 6:int, ListIndexColScalar(col 2:array<int>, col 1:int) -> 7:int, 
ListIndexColColumn(col 2:array<int>, col 4:int) -> 8:int
+              Statistics: Num rows: 1024 Data size: 5120 Basic stats: COMPLETE 
Column stats: NONE
+              Limit
+                Number of rows: 10
+                Limit Vectorization:
+                    className: VectorLimitOperator
+                    native: true
+                Statistics: Num rows: 10 Data size: 50 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  File Sink Vectorization:
+                      className: VectorFileSinkOperator
+                      native: false
+                  Statistics: Num rows: 10 Data size: 50 Basic stats: COMPLETE 
Column stats: NONE
+                  table:
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: 
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select l1, l1[0], l1[1], l1[listIndex], listIndex from 
parquet_complex_types limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select l1, l1[0], l1[1], l1[listIndex], listIndex from 
parquet_complex_types limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+[100,101]      100     101     100     0
+[102,103]      102     103     103     1
+[104,105]      104     105     104     0
+[106,107]      106     107     107     1
+[108,109]      108     109     108     0
+[110,111]      110     111     111     1
+[112,113]      112     113     112     0
+[114,115]      114     115     115     1
+[116,117]      116     117     116     0
+[118,119]      118     119     119     1
+PREHOOK: query: explain vectorization expression select sum(l1[0]), l1[1] from 
parquet_complex_types where l1[0] > 1000 group by l1[1] order by l1[1] limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select sum(l1[0]), l1[1] 
from parquet_complex_types where l1[0] > 1000 group by l1[1] order by l1[1] 
limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: parquet_complex_types
+            Statistics: Num rows: 1024 Data size: 5120 Basic stats: COMPLETE 
Column stats: NONE
+            TableScan Vectorization:
+                native: true
+            Filter Operator
+              Filter Vectorization:
+                  className: VectorFilterOperator
+                  native: true
+                  predicateExpression: FilterLongColGreaterLongScalar(col 
6:int, val 1000)(children: ListIndexColScalar(col 2:array<int>, col 0:int) -> 
6:int)
+              predicate: (l1[0] > 1000) (type: boolean)
+              Statistics: Num rows: 341 Data size: 1705 Basic stats: COMPLETE 
Column stats: NONE
+              Select Operator
+                expressions: l1[1] (type: int), l1[0] (type: int)
+                outputColumnNames: _col0, _col1
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [6, 7]
+                    selectExpressions: ListIndexColScalar(col 2:array<int>, 
col 1:int) -> 6:int, ListIndexColScalar(col 2:array<int>, col 0:int) -> 7:int
+                Statistics: Num rows: 341 Data size: 1705 Basic stats: 
COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: sum(_col1)
+                  Group By Vectorization:
+                      aggregators: VectorUDAFSumLong(col 7:int) -> bigint
+                      className: VectorGroupByOperator
+                      groupByMode: HASH
+                      keyExpressions: col 6:int
+                      native: false
+                      vectorProcessingMode: HASH
+                      projectedOutputColumnNums: [0]
+                  keys: _col0 (type: int)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 341 Data size: 1705 Basic stats: 
COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: int)
+                    Reduce Sink Vectorization:
+                        className: VectorReduceSinkOperator
+                        native: false
+                        nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, 
No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, 
LazyBinarySerDe for values IS true
+                        nativeConditionsNotMet: hive.execution.engine mr IN 
[tez, spark] IS false
+                    Statistics: Num rows: 341 Data size: 1705 Basic stats: 
COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col1 (type: bigint)
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: 
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+      Reduce Vectorization:
+          enabled: false
+          enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
+          enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS 
false
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: sum(VALUE._col0)
+          keys: KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 170 Data size: 850 Basic stats: COMPLETE 
Column stats: NONE
+          Select Operator
+            expressions: _col1 (type: bigint), _col0 (type: int)
+            outputColumnNames: _col0, _col2
+            Statistics: Num rows: 170 Data size: 850 Basic stats: COMPLETE 
Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            TableScan Vectorization:
+                native: true
+            Reduce Output Operator
+              key expressions: _col2 (type: int)
+              sort order: +
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, 
No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, 
LazyBinarySerDe for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
+              Statistics: Num rows: 170 Data size: 850 Basic stats: COMPLETE 
Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col0 (type: bigint)
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+      Reduce Vectorization:
+          enabled: false
+          enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
+          enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS 
false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: bigint), KEY.reducesinkkey0 (type: 
int)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 170 Data size: 850 Basic stats: COMPLETE 
Column stats: NONE
+          Limit
+            Number of rows: 10
+            Statistics: Num rows: 10 Data size: 50 Basic stats: COMPLETE 
Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 10 Data size: 50 Basic stats: COMPLETE 
Column stats: NONE
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select sum(l1[0]), l1[1] from parquet_complex_types where 
l1[0] > 1000 group by l1[1] order by l1[1] desc limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(l1[0]), l1[1] from parquet_complex_types where 
l1[0] > 1000 group by l1[1] order by l1[1] desc limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+2146   2147
+2144   2145
+2142   2143
+2140   2141
+2138   2139
+2136   2137
+2134   2135
+2132   2133
+2130   2131
+2128   2129
+PREHOOK: query: INSERT OVERWRITE TABLE parquet_complex_types
+SELECT id, m1, l1, st1, listIndex FROM parquet_complex_types_staging
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types_staging
+PREHOOK: Output: default@parquet_complex_types
+POSTHOOK: query: INSERT OVERWRITE TABLE parquet_complex_types
+SELECT id, m1, l1, st1, listIndex FROM parquet_complex_types_staging
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types_staging
+POSTHOOK: Output: default@parquet_complex_types
+POSTHOOK: Lineage: parquet_complex_types.id SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:id,
 type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.l1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:l1,
 type:array<int>, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.listindex SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:listindex,
 type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.m1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:m1,
 type:map<string,varchar(5)>, comment:null), ]
+POSTHOOK: Lineage: parquet_complex_types.st1 SIMPLE 
[(parquet_complex_types_staging)parquet_complex_types_staging.FieldSchema(name:st1,
 type:struct<c1:int,c2:string>, comment:null), ]
+PREHOOK: query: select count(*) from parquet_complex_types
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from parquet_complex_types
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+1025
+PREHOOK: query: explain vectorization expression select l1, l1[0], l1[1], 
l1[listIndex], listIndex from parquet_complex_types limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select l1, l1[0], l1[1], 
l1[listIndex], listIndex from parquet_complex_types limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: parquet_complex_types
+            Statistics: Num rows: 1025 Data size: 5125 Basic stats: COMPLETE 
Column stats: NONE
+            TableScan Vectorization:
+                native: true
+            Select Operator
+              expressions: l1 (type: array<int>), l1[0] (type: int), l1[1] 
(type: int), l1[listindex] (type: int), listindex (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4
+              Select Vectorization:
+                  className: VectorSelectOperator
+                  native: true
+                  projectedOutputColumnNums: [2, 6, 7, 8, 4]
+                  selectExpressions: ListIndexColScalar(col 2:array<int>, col 
0:int) -> 6:int, ListIndexColScalar(col 2:array<int>, col 1:int) -> 7:int, 
ListIndexColColumn(col 2:array<int>, col 4:int) -> 8:int
+              Statistics: Num rows: 1025 Data size: 5125 Basic stats: COMPLETE 
Column stats: NONE
+              Limit
+                Number of rows: 10
+                Limit Vectorization:
+                    className: VectorLimitOperator
+                    native: true
+                Statistics: Num rows: 10 Data size: 50 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  File Sink Vectorization:
+                      className: VectorFileSinkOperator
+                      native: false
+                  Statistics: Num rows: 10 Data size: 50 Basic stats: COMPLETE 
Column stats: NONE
+                  table:
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: 
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select l1, l1[0], l1[1], l1[listIndex], listIndex from 
parquet_complex_types limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select l1, l1[0], l1[1], l1[listIndex], listIndex from 
parquet_complex_types limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+[100,101]      100     101     100     0
+[102,103]      102     103     103     1
+[104,105]      104     105     104     0
+[106,107]      106     107     107     1
+[108,109]      108     109     108     0
+[110,111]      110     111     111     1
+[112,113]      112     113     112     0
+[114,115]      114     115     115     1
+[116,117]      116     117     116     0
+[118,119]      118     119     119     1
+PREHOOK: query: explain vectorization expression select sum(l1[0]), l1[1] from 
parquet_complex_types where l1[0] > 1000 group by l1[1] order by l1[1] limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select sum(l1[0]), l1[1] 
from parquet_complex_types where l1[0] > 1000 group by l1[1] order by l1[1] 
limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: parquet_complex_types
+            Statistics: Num rows: 1025 Data size: 5125 Basic stats: COMPLETE 
Column stats: NONE
+            TableScan Vectorization:
+                native: true
+            Filter Operator
+              Filter Vectorization:
+                  className: VectorFilterOperator
+                  native: true
+                  predicateExpression: FilterLongColGreaterLongScalar(col 
6:int, val 1000)(children: ListIndexColScalar(col 2:array<int>, col 0:int) -> 
6:int)
+              predicate: (l1[0] > 1000) (type: boolean)
+              Statistics: Num rows: 341 Data size: 1705 Basic stats: COMPLETE 
Column stats: NONE
+              Select Operator
+                expressions: l1[1] (type: int), l1[0] (type: int)
+                outputColumnNames: _col0, _col1
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [6, 7]
+                    selectExpressions: ListIndexColScalar(col 2:array<int>, 
col 1:int) -> 6:int, ListIndexColScalar(col 2:array<int>, col 0:int) -> 7:int
+                Statistics: Num rows: 341 Data size: 1705 Basic stats: 
COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: sum(_col1)
+                  Group By Vectorization:
+                      aggregators: VectorUDAFSumLong(col 7:int) -> bigint
+                      className: VectorGroupByOperator
+                      groupByMode: HASH
+                      keyExpressions: col 6:int
+                      native: false
+                      vectorProcessingMode: HASH
+                      projectedOutputColumnNums: [0]
+                  keys: _col0 (type: int)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 341 Data size: 1705 Basic stats: 
COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: int)
+                    Reduce Sink Vectorization:
+                        className: VectorReduceSinkOperator
+                        native: false
+                        nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, 
No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, 
LazyBinarySerDe for values IS true
+                        nativeConditionsNotMet: hive.execution.engine mr IN 
[tez, spark] IS false
+                    Statistics: Num rows: 341 Data size: 1705 Basic stats: 
COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col1 (type: bigint)
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: 
org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+      Reduce Vectorization:
+          enabled: false
+          enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
+          enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS 
false
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: sum(VALUE._col0)
+          keys: KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 170 Data size: 850 Basic stats: COMPLETE 
Column stats: NONE
+          Select Operator
+            expressions: _col1 (type: bigint), _col0 (type: int)
+            outputColumnNames: _col0, _col2
+            Statistics: Num rows: 170 Data size: 850 Basic stats: COMPLETE 
Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            TableScan Vectorization:
+                native: true
+            Reduce Output Operator
+              key expressions: _col2 (type: int)
+              sort order: +
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, 
No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, 
LazyBinarySerDe for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
+              Statistics: Num rows: 170 Data size: 850 Basic stats: COMPLETE 
Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col0 (type: bigint)
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+      Reduce Vectorization:
+          enabled: false
+          enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
+          enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS 
false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: bigint), KEY.reducesinkkey0 (type: 
int)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 170 Data size: 850 Basic stats: COMPLETE 
Column stats: NONE
+          Limit
+            Number of rows: 10
+            Statistics: Num rows: 10 Data size: 50 Basic stats: COMPLETE 
Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 10 Data size: 50 Basic stats: COMPLETE 
Column stats: NONE
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select sum(l1[0]), l1[1] from parquet_complex_types where 
l1[0] > 1000 group by l1[1] order by l1[1] desc limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(l1[0]), l1[1] from parquet_complex_types where 
l1[0] > 1000 group by l1[1] order by l1[1] desc limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_complex_types
+#### A masked pattern was here ####
+2148   2149
+2146   2147
+2144   2145
+2142   2143
+2140   2141
+2138   2139
+2136   2137
+2134   2135
+2132   2133
+2130   2131

http://git-wip-us.apache.org/repos/asf/hive/blob/b8aa16ff/ql/src/test/results/clientpositive/vector_complex_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_complex_join.q.out 
b/ql/src/test/results/clientpositive/vector_complex_join.q.out
index 23ae87e..7677697 100644
--- a/ql/src/test/results/clientpositive/vector_complex_join.q.out
+++ b/ql/src/test/results/clientpositive/vector_complex_join.q.out
@@ -136,24 +136,25 @@ POSTHOOK: Input: default@test
 #### A masked pattern was here ####
 alltypesorc.ctinyint   alltypesorc.csmallint   alltypesorc.cint        
alltypesorc.cbigint     alltypesorc.cfloat      alltypesorc.cdouble     
alltypesorc.cstring1    alltypesorc.cstring2    alltypesorc.ctimestamp1 
alltypesorc.ctimestamp2 alltypesorc.cboolean1   alltypesorc.cboolean2   test.a  
test.b
 -51    NULL    199408978       -1800989684     -51.0   NULL    
34N4EY63M1GFWuW0boW     P4PL5h1eXR4mMLr2        1969-12-31 16:00:08.451 NULL    
false   true    199408978       {1:"val_1",2:"val_2"}
-PREHOOK: query: CREATE TABLE test2a (a ARRAY<INT>) STORED AS ORC
+PREHOOK: query: CREATE TABLE test2a (a ARRAY<INT>, index INT) STORED AS ORC
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@test2a
-POSTHOOK: query: CREATE TABLE test2a (a ARRAY<INT>) STORED AS ORC
+POSTHOOK: query: CREATE TABLE test2a (a ARRAY<INT>, index INT) STORED AS ORC
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@test2a
-PREHOOK: query: INSERT OVERWRITE TABLE test2a SELECT ARRAY(1, 2) FROM src 
LIMIT 1
+PREHOOK: query: INSERT OVERWRITE TABLE test2a SELECT ARRAY(1, 2), 1 FROM src 
LIMIT 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@test2a
-POSTHOOK: query: INSERT OVERWRITE TABLE test2a SELECT ARRAY(1, 2) FROM src 
LIMIT 1
+POSTHOOK: query: INSERT OVERWRITE TABLE test2a SELECT ARRAY(1, 2), 1 FROM src 
LIMIT 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@test2a
 POSTHOOK: Lineage: test2a.a EXPRESSION []
-_c0
+POSTHOOK: Lineage: test2a.index SIMPLE []
+_c0    _c1
 PREHOOK: query: CREATE TABLE test2b (a INT) STORED AS ORC
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -211,35 +212,57 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: test2a
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
+            Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column 
stats: NONE
+            TableScan Vectorization:
+                native: true
             Filter Operator
+              Filter Vectorization:
+                  className: VectorFilterOperator
+                  native: true
+                  predicateExpression: SelectColumnIsNotNull(col 
3:int)(children: ListIndexColScalar(col 0:array<int>, col 1:int) -> 3:int)
               predicate: a[1] is not null (type: boolean)
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+              Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE 
Column stats: NONE
               Map Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
                   0 a (type: int)
                   1 a[1] (type: int)
-                outputColumnNames: _col0, _col4
+                Map Join Vectorization:
+                    className: VectorMapJoinOperator
+                    native: false
+                    nativeConditionsMet: hive.mapjoin.optimized.hashtable IS 
true, hive.vectorized.execution.mapjoin.native.enabled IS true, One MapJoin 
Condition IS true, No nullsafe IS true, Small table vectorizes IS true, 
Optimized Table and Supports Key Types IS true
+                    nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
+                outputColumnNames: _col0, _col4, _col5
                 Statistics: Num rows: 3 Data size: 13 Basic stats: COMPLETE 
Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: int), _col4 (type: array<int>)
-                  outputColumnNames: _col0, _col1
+                  expressions: _col0 (type: int), _col4 (type: array<int>), 
_col5 (type: int)
+                  outputColumnNames: _col0, _col1, _col2
+                  Select Vectorization:
+                      className: VectorSelectOperator
+                      native: true
+                      projectedOutputColumnNums: [0, 1, 2]
                   Statistics: Num rows: 3 Data size: 13 Basic stats: COMPLETE 
Column stats: NONE
                   File Output Operator
                     compressed: false
+                    File Sink Vectorization:
+                        className: VectorFileSinkOperator
+                        native: false
                     Statistics: Num rows: 3 Data size: 13 Basic stats: 
COMPLETE Column stats: NONE
                     table:
                         input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                         serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
       Map Vectorization:
           enabled: true
           enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
           inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-          notVectorizedReason: FILTER operator: Unexpected hive type name 
array<int>
-          vectorized: false
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Local Work:
         Map Reduce Local Work
 
@@ -259,5 +282,118 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@test2a
 POSTHOOK: Input: default@test2b
 #### A masked pattern was here ####
-test2b.a       test2a.a
-2      [1,2]
+test2b.a       test2a.a        test2a.index
+2      [1,2]   1
+PREHOOK: query: explain vectorization expression
+select *  from test2b join test2a on test2b.a = test2a.a[test2a.index]
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression
+select *  from test2b join test2a on test2b.a = test2a.a[test2a.index]
+POSTHOOK: type: QUERY
+Explain
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        test2b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        test2b 
+          TableScan
+            alias: test2b
+            Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column 
stats: NONE
+            Filter Operator
+              predicate: a is not null (type: boolean)
+              Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE 
Column stats: NONE
+              HashTable Sink Operator
+                keys:
+                  0 a (type: int)
+                  1 a[index] (type: int)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: test2a
+            Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column 
stats: NONE
+            TableScan Vectorization:
+                native: true
+            Filter Operator
+              Filter Vectorization:
+                  className: VectorFilterOperator
+                  native: true
+                  predicateExpression: SelectColumnIsNotNull(col 
3:int)(children: ListIndexColColumn(col 0:array<int>, col 1:int) -> 3:int)
+              predicate: a[index] is not null (type: boolean)
+              Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE 
Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 a (type: int)
+                  1 a[index] (type: int)
+                Map Join Vectorization:
+                    className: VectorMapJoinOperator
+                    native: false
+                    nativeConditionsMet: hive.mapjoin.optimized.hashtable IS 
true, hive.vectorized.execution.mapjoin.native.enabled IS true, One MapJoin 
Condition IS true, No nullsafe IS true, Small table vectorizes IS true, 
Optimized Table and Supports Key Types IS true
+                    nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
+                outputColumnNames: _col0, _col4, _col5
+                Statistics: Num rows: 3 Data size: 13 Basic stats: COMPLETE 
Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col4 (type: array<int>), 
_col5 (type: int)
+                  outputColumnNames: _col0, _col1, _col2
+                  Select Vectorization:
+                      className: VectorSelectOperator
+                      native: true
+                      projectedOutputColumnNums: [0, 1, 2]
+                  Statistics: Num rows: 3 Data size: 13 Basic stats: COMPLETE 
Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    File Sink Vectorization:
+                        className: VectorFileSinkOperator
+                        native: false
+                    Statistics: Num rows: 3 Data size: 13 Basic stats: 
COMPLETE Column stats: NONE
+                    table:
+                        input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS 
true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+      Local Work:
+        Map Reduce Local Work
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select *  from test2b join test2a on test2b.a = 
test2a.a[test2a.index]
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test2a
+PREHOOK: Input: default@test2b
+#### A masked pattern was here ####
+POSTHOOK: query: select *  from test2b join test2a on test2b.a = 
test2a.a[test2a.index]
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test2a
+POSTHOOK: Input: default@test2b
+#### A masked pattern was here ####
+test2b.a       test2a.a        test2a.index
+2      [1,2]   1

Reply via email to