Repository: hive
Updated Branches:
  refs/heads/master 284c8f3bf -> 7fc60962f


http://git-wip-us.apache.org/repos/asf/hive/blob/7fc60962/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_all_primitive.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_all_primitive.q.out
 
b/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_all_primitive.q.out
index 03c6936..757ea3a 100644
--- 
a/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_all_primitive.q.out
+++ 
b/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_part_all_primitive.q.out
@@ -242,25 +242,49 @@ POSTHOOK: Lineage: 
part_change_various_various_boolean_to_bigint PARTITION(part=
 POSTHOOK: Lineage: part_change_various_various_boolean_to_bigint 
PARTITION(part=1).c9 SIMPLE 
[(schema_evolution_data)schema_evolution_data.FieldSchema(name:boolean1, 
type:boolean, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_boolean_to_bigint 
PARTITION(part=1).insert_num SIMPLE 
[(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 insert_num     boolean1        boolean1        boolean1        boolean1        
boolean1        boolean1        boolean1        boolean1        boolean1        
tinyint1        tinyint1        tinyint1        tinyint1        tinyint1        
tinyint1        tinyint1        tinyint1        tinyint1        tinyint1        
tinyint1        smallint1       smallint1       smallint1       smallint1       
smallint1       smallint1       smallint1       smallint1       smallint1       
smallint1       smallint1       int1    int1    int1    int1    int1    int1    
int1    int1    int1    int1    int1    bigint1 bigint1 bigint1 bigint1 bigint1 
bigint1 bigint1 bigint1 bigint1 bigint1 bigint1 _c54
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b
 from part_change_various_various_boolean_to_bigint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b
 from part_change_various_various_boolean_to_bigint
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: false
+  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 llap
-      File Output Operator [FS_2]
-        Select Operator [SEL_1] (rows=10 width=4)
-          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43","_col44","_col45","_col46","_col47","_col48","_col49","_col50","_col51","_col52","_col53","_col54","_col55"]
-          TableScan [TS_0] (rows=10 width=512)
-            
default@part_change_various_various_boolean_to_bigint,part_change_various_various_boolean_to_bigint,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","c21","c22","c23","c24","c25","c26","c27","c28","c29","c30","c31","c32","c33","c34","c35","c36","c37","c38","c39","c40","c41","c42","c43","c44","c45","c46","c47","c48","c49","c50","c51","c52","c53","b"]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: part_change_various_various_boolean_to_bigint
+                  Statistics: Num rows: 10 Data size: 5126 Basic stats: 
COMPLETE Column stats: PARTIAL
+                  Select Operator
+                    expressions: insert_num (type: int), part (type: int), c1 
(type: boolean), c2 (type: boolean), c3 (type: boolean), c4 (type: boolean), c5 
(type: boolean), c6 (type: boolean), c7 (type: boolean), c8 (type: boolean), c9 
(type: boolean), c10 (type: tinyint), c11 (type: tinyint), c12 (type: tinyint), 
c13 (type: tinyint), c14 (type: tinyint), c15 (type: tinyint), c16 (type: 
tinyint), c17 (type: tinyint), c18 (type: tinyint), c19 (type: tinyint), c20 
(type: tinyint), c21 (type: smallint), c22 (type: smallint), c23 (type: 
smallint), c24 (type: smallint), c25 (type: smallint), c26 (type: smallint), 
c27 (type: smallint), c28 (type: smallint), c29 (type: smallint), c30 (type: 
smallint), c31 (type: smallint), c32 (type: int), c33 (type: int), c34 (type: 
int), c35 (type: int), c36 (type: int), c37 (type: int), c38 (type: int), c39 
(type: int), c40 (type: int), c41 (type: int), c42 (type: int), c43 (type: 
bigint), c44 (type: bigint), c45 (type: bigint), c46 (type: bigint), c4
 7 (type: bigint), c48 (type: bigint), c49 (type: bigint), c50 (type: bigint), 
c51 (type: bigint), c52 (type: bigint), c53 (type: bigint), b (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, 
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, 
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, 
_col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44, 
_col45, _col46, _col47, _col48, _col49, _col50, _col51, _col52, _col53, _col54, 
_col55
+                    Statistics: Num rows: 10 Data size: 40 Basic stats: 
COMPLETE Column stats: PARTIAL
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 10 Data size: 40 Basic stats: 
COMPLETE Column stats: PARTIAL
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b
 from part_change_various_various_boolean_to_bigint
 PREHOOK: type: QUERY
@@ -443,25 +467,49 @@ POSTHOOK: Lineage: 
part_change_various_various_decimal_to_double PARTITION(part=
 POSTHOOK: Lineage: part_change_various_various_decimal_to_double 
PARTITION(part=1).c9 SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:decimal1, 
type:decimal(38,18), comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_decimal_to_double 
PARTITION(part=1).insert_num SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 insert_num     decimal1        decimal1        decimal1        decimal1        
decimal1        decimal1        decimal1        decimal1        decimal1        
decimal1        decimal1        float1  float1  float1  float1  float1  float1  
float1  float1  float1  float1  float1  double1 double1 double1 double1 double1 
double1 double1 double1 double1 double1 double1 _c34
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b
 from part_change_various_various_decimal_to_double
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b
 from part_change_various_various_decimal_to_double
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: false
+  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 llap
-      File Output Operator [FS_2]
-        Select Operator [SEL_1] (rows=6 width=4)
-          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35"]
-          TableScan [TS_0] (rows=6 width=455)
-            
default@part_change_various_various_decimal_to_double,part_change_various_various_decimal_to_double,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","c21","c22","c23","c24","c25","c26","c27","c28","c29","c30","c31","c32","c33","b"]
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: part_change_various_various_decimal_to_double
+                  Statistics: Num rows: 6 Data size: 2735 Basic stats: 
COMPLETE Column stats: PARTIAL
+                  Select Operator
+                    expressions: insert_num (type: int), part (type: int), c1 
(type: decimal(38,18)), c2 (type: decimal(38,18)), c3 (type: decimal(38,18)), 
c4 (type: decimal(38,18)), c5 (type: decimal(38,18)), c6 (type: 
decimal(38,18)), c7 (type: decimal(38,18)), c8 (type: decimal(38,18)), c9 
(type: decimal(38,18)), c10 (type: decimal(38,18)), c11 (type: decimal(38,18)), 
c12 (type: float), c13 (type: float), c14 (type: float), c15 (type: float), c16 
(type: float), c17 (type: float), c18 (type: float), c19 (type: float), c20 
(type: float), c21 (type: float), c22 (type: float), c23 (type: double), c24 
(type: double), c25 (type: double), c26 (type: double), c27 (type: double), c28 
(type: double), c29 (type: double), c30 (type: double), c31 (type: double), c32 
(type: double), c33 (type: double), b (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, 
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, 
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, 
_col35
+                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b
 from part_change_various_various_decimal_to_double
 PREHOOK: type: QUERY
@@ -566,25 +614,49 @@ POSTHOOK: Lineage: part_change_various_various_timestamp 
PARTITION(part=1).c8 SI
 POSTHOOK: Lineage: part_change_various_various_timestamp PARTITION(part=1).c9 
SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:timestamp1, 
type:timestamp, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_timestamp 
PARTITION(part=1).insert_num SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 insert_num     timestamp1      timestamp1      timestamp1      timestamp1      
timestamp1      timestamp1      timestamp1      timestamp1      timestamp1      
timestamp1      timestamp1      timestamp1      _c13
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from 
part_change_various_various_timestamp
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from 
part_change_various_various_timestamp
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: false
+  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 llap
-      File Output Operator [FS_2]
-        Select Operator [SEL_1] (rows=6 width=4)
-          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"]
-          TableScan [TS_0] (rows=6 width=151)
-            
default@part_change_various_various_timestamp,part_change_various_various_timestamp,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","b"]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: part_change_various_various_timestamp
+                  Statistics: Num rows: 6 Data size: 907 Basic stats: COMPLETE 
Column stats: PARTIAL
+                  Select Operator
+                    expressions: insert_num (type: int), part (type: int), c1 
(type: timestamp), c2 (type: timestamp), c3 (type: timestamp), c4 (type: 
timestamp), c5 (type: timestamp), c6 (type: timestamp), c7 (type: timestamp), 
c8 (type: timestamp), c9 (type: timestamp), c10 (type: timestamp), c11 (type: 
timestamp), c12 (type: timestamp), b (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
+                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from 
part_change_various_various_timestamp
 PREHOOK: type: QUERY
@@ -673,25 +745,49 @@ POSTHOOK: Lineage: part_change_various_various_date 
PARTITION(part=1).c3 SIMPLE
 POSTHOOK: Lineage: part_change_various_various_date PARTITION(part=1).c4 
SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:date1, 
type:date, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_date 
PARTITION(part=1).insert_num SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 insert_num     date1   date1   date1   date1   _c5
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: false
+  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: part_change_various_various_date
+                  Statistics: Num rows: 6 Data size: 461 Basic stats: COMPLETE 
Column stats: PARTIAL
+                  Select Operator
+                    expressions: insert_num (type: int), part (type: int), c1 
(type: date), c2 (type: date), c3 (type: date), c4 (type: date), b (type: 
string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6
+                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 llap
-      File Output Operator [FS_2]
-        Select Operator [SEL_1] (rows=6 width=4)
-          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-          TableScan [TS_0] (rows=6 width=76)
-            
default@part_change_various_various_date,part_change_various_various_date,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","b"]
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,b from 
part_change_various_various_date
 PREHOOK: type: QUERY
@@ -861,25 +957,49 @@ POSTHOOK: Lineage: part_change_same_type_different_params 
PARTITION(part=2).c5 S
 POSTHOOK: Lineage: part_change_same_type_different_params PARTITION(part=2).c6 
SIMPLE [(same_type1_c_txt)same_type1_c_txt.FieldSchema(name:c6, 
type:decimal(25,15), comment:null), ]
 POSTHOOK: Lineage: part_change_same_type_different_params 
PARTITION(part=2).insert_num SIMPLE 
[(same_type1_c_txt)same_type1_c_txt.FieldSchema(name:insert_num, type:int, 
comment:null), ]
 same_type1_c_txt.insert_num    same_type1_c_txt.c1     same_type1_c_txt.c2     
same_type1_c_txt.c3     same_type1_c_txt.c4     same_type1_c_txt.c5     
same_type1_c_txt.c6     same_type1_c_txt.b
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from 
part_change_same_type_different_params
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from 
part_change_same_type_different_params
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: false
+  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: part_change_same_type_different_params
+                  Statistics: Num rows: 13 Data size: 1427 Basic stats: 
COMPLETE Column stats: PARTIAL
+                  Select Operator
+                    expressions: insert_num (type: int), part (type: int), c1 
(type: char(8)), c2 (type: char(32)), c3 (type: varchar(15)), c4 (type: 
varchar(18)), c5 (type: decimal(10,2)), c6 (type: decimal(25,15)), b (type: 
string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8
+                    Statistics: Num rows: 13 Data size: 52 Basic stats: 
COMPLETE Column stats: PARTIAL
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 13 Data size: 52 Basic stats: 
COMPLETE Column stats: PARTIAL
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 llap
-      File Output Operator [FS_2]
-        Select Operator [SEL_1] (rows=13 width=4)
-          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
-          TableScan [TS_0] (rows=13 width=109)
-            
default@part_change_same_type_different_params,part_change_same_type_different_params,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","b"]
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,b from 
part_change_same_type_different_params
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/7fc60962/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_table.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_table.q.out 
b/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_table.q.out
index da99110..e0a4ffb 100644
--- 
a/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_table.q.out
+++ 
b/ql/src/test/results/clientpositive/llap/schema_evol_text_nonvec_table.q.out
@@ -55,25 +55,49 @@ POSTHOOK: Lineage: table_add_int_permute_select.b SIMPLE 
[(values__tmp__table__1
 POSTHOOK: Lineage: table_add_int_permute_select.c EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, 
type:string, comment:), ]
 POSTHOOK: Lineage: table_add_int_permute_select.insert_num EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,a,b from table_add_int_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,a,b from table_add_int_permute_select
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: false
+  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 llap
-      File Output Operator [FS_2]
-        Select Operator [SEL_1] (rows=6 width=20)
-          Output:["_col0","_col1","_col2"]
-          TableScan [TS_0] (rows=6 width=20)
-            
default@table_add_int_permute_select,table_add_int_permute_select,Tbl:COMPLETE,Col:NONE,Output:["insert_num","a","b"]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: table_add_int_permute_select
+                  Statistics: Num rows: 6 Data size: 120 Basic stats: COMPLETE 
Column stats: NONE
+                  Select Operator
+                    expressions: insert_num (type: int), a (type: int), b 
(type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 6 Data size: 120 Basic stats: 
COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 6 Data size: 120 Basic stats: 
COMPLETE Column stats: NONE
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select insert_num,a,b from table_add_int_permute_select
 PREHOOK: type: QUERY
@@ -168,25 +192,49 @@ POSTHOOK: Lineage: table_add_int_string_permute_select.c 
EXPRESSION [(values__tm
 POSTHOOK: Lineage: table_add_int_string_permute_select.d SIMPLE 
[(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col5, 
type:string, comment:), ]
 POSTHOOK: Lineage: table_add_int_string_permute_select.insert_num EXPRESSION 
[(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3   _col4
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,a,b from table_add_int_string_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,a,b from table_add_int_string_permute_select
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: false
+  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 llap
-      File Output Operator [FS_2]
-        Select Operator [SEL_1] (rows=6 width=21)
-          Output:["_col0","_col1","_col2"]
-          TableScan [TS_0] (rows=6 width=21)
-            
default@table_add_int_string_permute_select,table_add_int_string_permute_select,Tbl:COMPLETE,Col:NONE,Output:["insert_num","a","b"]
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: table_add_int_string_permute_select
+                  Statistics: Num rows: 6 Data size: 127 Basic stats: COMPLETE 
Column stats: NONE
+                  Select Operator
+                    expressions: insert_num (type: int), a (type: int), b 
(type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 6 Data size: 127 Basic stats: 
COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 6 Data size: 127 Basic stats: 
COMPLETE Column stats: NONE
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select insert_num,a,b from table_add_int_string_permute_select
 PREHOOK: type: QUERY
@@ -343,25 +391,49 @@ POSTHOOK: Lineage: table_change_string_group_double.c2 
EXPRESSION [(values__tmp_
 POSTHOOK: Lineage: table_change_string_group_double.c3 EXPRESSION 
[(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col4, 
type:string, comment:), ]
 POSTHOOK: Lineage: table_change_string_group_double.insert_num EXPRESSION 
[(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3   _col4
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,c1,c2,c3,b from table_change_string_group_double
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,c1,c2,c3,b from table_change_string_group_double
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: false
+  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 llap
-      File Output Operator [FS_2]
-        Select Operator [SEL_1] (rows=6 width=80)
-          Output:["_col0","_col1","_col2","_col3","_col4"]
-          TableScan [TS_0] (rows=6 width=80)
-            
default@table_change_string_group_double,table_change_string_group_double,Tbl:COMPLETE,Col:NONE,Output:["insert_num","c1","c2","c3","b"]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: table_change_string_group_double
+                  Statistics: Num rows: 6 Data size: 482 Basic stats: COMPLETE 
Column stats: NONE
+                  Select Operator
+                    expressions: insert_num (type: int), c1 (type: double), c2 
(type: double), c3 (type: double), b (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                    Statistics: Num rows: 6 Data size: 482 Basic stats: 
COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 6 Data size: 482 Basic stats: 
COMPLETE Column stats: NONE
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select insert_num,c1,c2,c3,b from 
table_change_string_group_double
 PREHOOK: type: QUERY
@@ -587,25 +659,49 @@ POSTHOOK: Lineage: 
table_change_numeric_group_string_group_multi_ints_string_gro
 POSTHOOK: Lineage: 
table_change_numeric_group_string_group_multi_ints_string_group.c9 EXPRESSION 
[(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col10,
 type:string, comment:), ]
 POSTHOOK: Lineage: 
table_change_numeric_group_string_group_multi_ints_string_group.insert_num 
EXPRESSION 
[(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3   _col4   _col5   _col6   _col7   _col8   _col9   
_col10  _col11  _col12  _col13  _col14  _col15  _col16  _col17  _col18  _col19  
_col20  _col21
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select 
insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b
 from table_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select 
insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b
 from table_change_numeric_group_string_group_multi_ints_string_group
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: false
+  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: 
table_change_numeric_group_string_group_multi_ints_string_group
+                  Statistics: Num rows: 6 Data size: 1070 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: insert_num (type: int), c1 (type: string), c2 
(type: string), c3 (type: string), c4 (type: string), c5 (type: char(50)), c6 
(type: char(50)), c7 (type: char(50)), c8 (type: char(50)), c9 (type: char(5)), 
c10 (type: char(5)), c11 (type: char(5)), c12 (type: char(5)), c13 (type: 
varchar(50)), c14 (type: varchar(50)), c15 (type: varchar(50)), c16 (type: 
varchar(50)), c17 (type: varchar(5)), c18 (type: varchar(5)), c19 (type: 
varchar(5)), c20 (type: varchar(5)), b (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, 
_col15, _col16, _col17, _col18, _col19, _col20, _col21
+                    Statistics: Num rows: 6 Data size: 1070 Basic stats: 
COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 6 Data size: 1070 Basic stats: 
COMPLETE Column stats: NONE
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 llap
-      File Output Operator [FS_2]
-        Select Operator [SEL_1] (rows=6 width=178)
-          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21"]
-          TableScan [TS_0] (rows=6 width=178)
-            
default@table_change_numeric_group_string_group_multi_ints_string_group,table_change_numeric_group_string_group_multi_ints_string_group,Tbl:COMPLETE,Col:NONE,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","b"]
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select 
insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b
 from table_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
@@ -742,25 +838,49 @@ POSTHOOK: Lineage: 
table_change_numeric_group_string_group_floating_string_group
 POSTHOOK: Lineage: 
table_change_numeric_group_string_group_floating_string_group.c9 EXPRESSION 
[(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col10,
 type:string, comment:), ]
 POSTHOOK: Lineage: 
table_change_numeric_group_string_group_floating_string_group.insert_num 
EXPRESSION 
[(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3   _col4   _col5   _col6   _col7   _col8   _col9   
_col10  _col11  _col12  _col13  _col14  _col15  _col16
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from 
table_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from 
table_change_numeric_group_string_group_floating_string_group
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: false
+  enabledConditionsNotMet: [hive.vectorized.execution.enabled IS false]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: 
table_change_numeric_group_string_group_floating_string_group
+                  Statistics: Num rows: 6 Data size: 1497 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: insert_num (type: int), c1 (type: string), c2 
(type: string), c3 (type: string), c4 (type: char(50)), c5 (type: char(50)), c6 
(type: char(50)), c7 (type: char(7)), c8 (type: char(7)), c9 (type: char(7)), 
c10 (type: varchar(50)), c11 (type: varchar(50)), c12 (type: varchar(50)), c13 
(type: varchar(7)), c14 (type: varchar(7)), c15 (type: varchar(7)), b (type: 
string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, 
_col15, _col16
+                    Statistics: Num rows: 6 Data size: 1497 Basic stats: 
COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 6 Data size: 1497 Basic stats: 
COMPLETE Column stats: NONE
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 llap
-      File Output Operator [FS_2]
-        Select Operator [SEL_1] (rows=6 width=249)
-          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16"]
-          TableScan [TS_0] (rows=6 width=249)
-            
default@table_change_numeric_group_string_group_floating_string_group,table_change_numeric_group_string_group_floating_string_group,Tbl:COMPLETE,Col:NONE,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","b"]
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select 
insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from 
table_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/7fc60962/ql/src/test/results/clientpositive/llap/vector_bucket.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_bucket.q.out 
b/ql/src/test/results/clientpositive/llap/vector_bucket.q.out
index 7917ffd..18dcce2 100644
--- a/ql/src/test/results/clientpositive/llap/vector_bucket.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_bucket.q.out
@@ -57,7 +57,7 @@ STAGE PLANS:
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
                 groupByVectorOutput: true
                 allNative: false
-                usesVectorUDFAdaptor: true
+                usesVectorUDFAdaptor: false
                 vectorized: true
             Reduce Operator Tree:
               Select Operator
@@ -67,7 +67,7 @@ STAGE PLANS:
                     className: VectorSelectOperator
                     native: true
                     projectedOutputColumns: [2, 1]
-                    selectExpressions: 
VectorUDFAdaptor(UDFToInteger(VALUE._col0)) -> 2:int
+                    selectExpressions: CastStringToLong(col 0) -> 2:int
                 Statistics: Num rows: 1 Data size: 26 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false

http://git-wip-us.apache.org/repos/asf/hive/blob/7fc60962/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_2.q.out 
b/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
index c2d0c42..98db4c1 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
@@ -91,7 +91,7 @@ STAGE PLANS:
                         className: VectorSelectOperator
                         native: true
                         projectedOutputColumns: [1, 2]
-                        selectExpressions: VectorUDFAdaptor(UDFToInteger(key)) 
-> 2:int
+                        selectExpressions: CastStringToLong(col 0) -> 2:int
                     Statistics: Num rows: 500 Data size: 99000 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(_col1), count()
@@ -126,7 +126,7 @@ STAGE PLANS:
                 groupByVectorOutput: true
                 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: false
-                usesVectorUDFAdaptor: true
+                usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
@@ -289,7 +289,7 @@ STAGE PLANS:
                         className: VectorSelectOperator
                         native: true
                         projectedOutputColumns: [1, 2]
-                        selectExpressions: VectorUDFAdaptor(UDFToInteger(key)) 
-> 2:int
+                        selectExpressions: CastStringToLong(col 0) -> 2:int
                     Statistics: Num rows: 500 Data size: 99000 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(_col1), count()
@@ -324,7 +324,7 @@ STAGE PLANS:
                 groupByVectorOutput: true
                 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: false
-                usesVectorUDFAdaptor: true
+                usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/7fc60962/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out 
b/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out
index ae6aecc..5a283bc 100644
--- a/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out
@@ -219,7 +219,7 @@ STAGE PLANS:
                         className: VectorSelectOperator
                         native: true
                         projectedOutputColumns: [1, 4]
-                        selectExpressions: 
VectorUDFAdaptor(UDFToInteger(COALESCE(str1,0)))(children: 
VectorCoalesce(columns [0, 2])(children: col 0, ConstantVectorExpression(val 0) 
-> 2:string) -> 3:string) -> 4:int
+                        selectExpressions: CastStringToLong(col 3)(children: 
VectorCoalesce(columns [0, 2])(children: col 0, ConstantVectorExpression(val 0) 
-> 2:string) -> 3:string) -> 4:int
                     Statistics: Num rows: 4 Data size: 510 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(_col1)
@@ -252,7 +252,7 @@ STAGE PLANS:
                 groupByVectorOutput: true
                 inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: false
-                usesVectorUDFAdaptor: true
+                usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/7fc60962/ql/src/test/results/clientpositive/vector_char_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_char_2.q.out 
b/ql/src/test/results/clientpositive/vector_char_2.q.out
index 8c5dd3e..adfd5f0 100644
--- a/ql/src/test/results/clientpositive/vector_char_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_char_2.q.out
@@ -85,7 +85,7 @@ STAGE PLANS:
                   className: VectorSelectOperator
                   native: true
                   projectedOutputColumns: [1, 2]
-                  selectExpressions: VectorUDFAdaptor(UDFToInteger(key)) -> 
2:int
+                  selectExpressions: CastStringToLong(col 0) -> 2:int
               Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE 
Column stats: NONE
               Group By Operator
                 aggregations: sum(_col1), count()
@@ -119,7 +119,7 @@ STAGE PLANS:
           groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
           allNative: false
-          usesVectorUDFAdaptor: true
+          usesVectorUDFAdaptor: false
           vectorized: true
       Reduce Vectorization:
           enabled: false
@@ -263,7 +263,7 @@ STAGE PLANS:
                   className: VectorSelectOperator
                   native: true
                   projectedOutputColumns: [1, 2]
-                  selectExpressions: VectorUDFAdaptor(UDFToInteger(key)) -> 
2:int
+                  selectExpressions: CastStringToLong(col 0) -> 2:int
               Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE 
Column stats: NONE
               Group By Operator
                 aggregations: sum(_col1), count()
@@ -297,7 +297,7 @@ STAGE PLANS:
           groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
           allNative: false
-          usesVectorUDFAdaptor: true
+          usesVectorUDFAdaptor: false
           vectorized: true
       Reduce Vectorization:
           enabled: false

http://git-wip-us.apache.org/repos/asf/hive/blob/7fc60962/ql/src/test/results/clientpositive/vector_coalesce_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_coalesce_2.q.out 
b/ql/src/test/results/clientpositive/vector_coalesce_2.q.out
index 12d1aaa..1518235 100644
--- a/ql/src/test/results/clientpositive/vector_coalesce_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_coalesce_2.q.out
@@ -198,7 +198,7 @@ STAGE PLANS:
                   className: VectorSelectOperator
                   native: true
                   projectedOutputColumns: [1, 4]
-                  selectExpressions: 
VectorUDFAdaptor(UDFToInteger(COALESCE(str1,0)))(children: 
VectorCoalesce(columns [0, 2])(children: col 0, ConstantVectorExpression(val 0) 
-> 2:string) -> 3:string) -> 4:int
+                  selectExpressions: CastStringToLong(col 3)(children: 
VectorCoalesce(columns [0, 2])(children: col 0, ConstantVectorExpression(val 0) 
-> 2:string) -> 3:string) -> 4:int
               Statistics: Num rows: 4 Data size: 510 Basic stats: COMPLETE 
Column stats: NONE
               Group By Operator
                 aggregations: sum(_col1)
@@ -231,7 +231,7 @@ STAGE PLANS:
           groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
           allNative: false
-          usesVectorUDFAdaptor: true
+          usesVectorUDFAdaptor: false
           vectorized: true
       Reduce Vectorization:
           enabled: false

Reply via email to