http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out 
b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
index 7687cff..dc80037 100644
--- a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
@@ -29,21 +29,40 @@ STAGE PLANS:
                 TableScan
                   alias: li
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: 
COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15]
                   Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: FilterExprAndExpr(children: 
FilterLongColEqualLongScalar(col 3, val 1) -> boolean, 
SelectColumnIsNotNull(col 1) -> boolean, SelectColumnIsNotNull(col 0) -> 
boolean) -> boolean
                     predicate: ((l_linenumber = 1) and l_partkey is not null 
and l_orderkey is not null) (type: boolean)
                     Statistics: Num rows: 50 Data size: 5999 Basic stats: 
COMPLETE Column stats: NONE
                     Select Operator
                       expressions: l_orderkey (type: int), l_partkey (type: 
int), l_suppkey (type: int)
                       outputColumnNames: _col0, _col1, _col2
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumns: [0, 1, 2]
                       Statistics: Num rows: 50 Data size: 5999 Basic stats: 
COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
+                        Spark Hash Table Sink Vectorization:
+                            className: VectorSparkHashTableSinkOperator
+                            native: true
                         keys:
                           0 _col0 (type: int)
                           1 _col1 (type: int)
+            Execution mode: vectorized
             Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
+                groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+                allNative: true
+                usesVectorUDFAdaptor: false
+                vectorized: true
             Local Work:
               Map Reduce Local Work
         Map 4 
@@ -51,30 +70,51 @@ STAGE PLANS:
                 TableScan
                   alias: lineitem
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: 
COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15]
                   Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: FilterExprAndExpr(children: 
FilterStringGroupColEqualStringScalar(col 14, val AIR) -> boolean, 
SelectColumnIsNotNull(col 0) -> boolean) -> boolean
                     predicate: ((l_shipmode = 'AIR') and l_orderkey is not 
null) (type: boolean)
                     Statistics: Num rows: 50 Data size: 5999 Basic stats: 
COMPLETE Column stats: NONE
                     Select Operator
                       expressions: l_orderkey (type: int)
                       outputColumnNames: _col0
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumns: [0]
                       Statistics: Num rows: 50 Data size: 5999 Basic stats: 
COMPLETE Column stats: NONE
                       Group By Operator
                         Group By Vectorization:
-                            vectorOutput: false
+                            className: VectorGroupByOperator
+                            vectorOutput: true
+                            keyExpressions: col 0
                             native: false
-                            projectedOutputColumns: null
+                            projectedOutputColumns: []
                         keys: _col0 (type: int)
                         mode: hash
                         outputColumnNames: _col0
                         Statistics: Num rows: 50 Data size: 5999 Basic stats: 
COMPLETE Column stats: NONE
                         Spark HashTable Sink Operator
+                          Spark Hash Table Sink Vectorization:
+                              className: VectorSparkHashTableSinkOperator
+                              native: true
                           keys:
                             0 _col1 (type: int)
                             1 _col0 (type: int)
+            Execution mode: vectorized
             Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
+                groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
             Local Work:
               Map Reduce Local Work
 
@@ -89,14 +129,23 @@ STAGE PLANS:
                 TableScan
                   alias: lineitem
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: 
COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15]
                   Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: SelectColumnIsNotNull(col 1) -> 
boolean
                     predicate: l_partkey is not null (type: boolean)
                     Statistics: Num rows: 100 Data size: 11999 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       Group By Vectorization:
-                          vectorOutput: false
+                          className: VectorGroupByOperator
+                          vectorOutput: true
+                          keyExpressions: col 1
                           native: false
-                          projectedOutputColumns: null
+                          projectedOutputColumns: []
                       keys: l_partkey (type: int)
                       mode: hash
                       outputColumnNames: _col0
@@ -105,11 +154,20 @@ STAGE PLANS:
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
+                        Reduce Sink Vectorization:
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 100 Data size: 11999 Basic 
stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
             Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
+                groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
         Reducer 2 
             Execution mode: vectorized
             Local Work:
@@ -241,21 +299,41 @@ STAGE PLANS:
                 TableScan
                   alias: li
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: 
COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15]
                   Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: FilterExprAndExpr(children: 
FilterLongColEqualLongScalar(col 3, val 1) -> boolean, 
SelectColumnIsNotNull(col 1) -> boolean) -> boolean
                     predicate: ((l_linenumber = 1) and l_partkey is not null) 
(type: boolean)
                     Statistics: Num rows: 50 Data size: 5999 Basic stats: 
COMPLETE Column stats: NONE
                     Select Operator
                       expressions: l_orderkey (type: int), l_partkey (type: 
int), l_suppkey (type: int), 1 (type: int)
                       outputColumnNames: _col0, _col1, _col2, _col3
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumns: [0, 1, 2, 16]
+                          selectExpressions: ConstantVectorExpression(val 1) 
-> 16:long
                       Statistics: Num rows: 50 Data size: 5999 Basic stats: 
COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
+                        Spark Hash Table Sink Vectorization:
+                            className: VectorSparkHashTableSinkOperator
+                            native: true
                         keys:
                           0 _col0 (type: int)
                           1 _col1 (type: int)
+            Execution mode: vectorized
             Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
+                groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+                allNative: true
+                usesVectorUDFAdaptor: false
+                vectorized: true
             Local Work:
               Map Reduce Local Work
         Map 4 
@@ -263,30 +341,51 @@ STAGE PLANS:
                 TableScan
                   alias: lineitem
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: 
COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15]
                   Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: FilterExprAndExpr(children: 
FilterStringGroupColEqualStringScalar(col 14, val AIR) -> boolean, 
FilterLongColEqualLongColumn(col 3, col 3) -> boolean) -> boolean
                     predicate: ((l_shipmode = 'AIR') and (l_linenumber = 
l_linenumber)) (type: boolean)
                     Statistics: Num rows: 25 Data size: 2999 Basic stats: 
COMPLETE Column stats: NONE
                     Select Operator
                       expressions: l_orderkey (type: int), l_linenumber (type: 
int)
                       outputColumnNames: _col0, _col1
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumns: [0, 3]
                       Statistics: Num rows: 25 Data size: 2999 Basic stats: 
COMPLETE Column stats: NONE
                       Group By Operator
                         Group By Vectorization:
-                            vectorOutput: false
+                            className: VectorGroupByOperator
+                            vectorOutput: true
+                            keyExpressions: col 0, col 3
                             native: false
-                            projectedOutputColumns: null
+                            projectedOutputColumns: []
                         keys: _col0 (type: int), _col1 (type: int)
                         mode: hash
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 25 Data size: 2999 Basic stats: 
COMPLETE Column stats: NONE
                         Spark HashTable Sink Operator
+                          Spark Hash Table Sink Vectorization:
+                              className: VectorSparkHashTableSinkOperator
+                              native: true
                           keys:
                             0 _col1 (type: int), _col4 (type: int)
                             1 _col0 (type: int), _col1 (type: int)
+            Execution mode: vectorized
             Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
+                groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
             Local Work:
               Map Reduce Local Work
 
@@ -301,14 +400,23 @@ STAGE PLANS:
                 TableScan
                   alias: lineitem
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: 
COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15]
                   Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: SelectColumnIsNotNull(col 1) -> 
boolean
                     predicate: l_partkey is not null (type: boolean)
                     Statistics: Num rows: 100 Data size: 11999 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       Group By Vectorization:
-                          vectorOutput: false
+                          className: VectorGroupByOperator
+                          vectorOutput: true
+                          keyExpressions: col 1
                           native: false
-                          projectedOutputColumns: null
+                          projectedOutputColumns: []
                       keys: l_partkey (type: int)
                       mode: hash
                       outputColumnNames: _col0
@@ -317,11 +425,20 @@ STAGE PLANS:
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
+                        Reduce Sink Vectorization:
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 100 Data size: 11999 Basic 
stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
             Map Vectorization:
-                enabled: false
-                enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
+                groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
         Reducer 2 
             Execution mode: vectorized
             Local Work:

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/structin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/structin.q.out 
b/ql/src/test/results/clientpositive/structin.q.out
index f67597a..252b4ac 100644
--- a/ql/src/test/results/clientpositive/structin.q.out
+++ b/ql/src/test/results/clientpositive/structin.q.out
@@ -57,6 +57,7 @@ STAGE PLANS:
                       input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out 
b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
index da52b0a..b3359d3 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
@@ -275,10 +275,10 @@ Stage-3
         Stage-2
           Dependency Collection{}
             Stage-1
-              Map 1
-              File Output Operator [FS_2]
+              Map 1 vectorized
+              File Output Operator [FS_4]
                 table:{"name:":"default.src_autho_test"}
-                Select Operator [SEL_1] (rows=500 width=178)
+                Select Operator [SEL_3] (rows=500 width=178)
                   Output:["_col0","_col1"]
                   TableScan [TS_0] (rows=500 width=178)
                     
default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
@@ -462,14 +462,14 @@ Stage-0
     limit:5
     Stage-1
       Reducer 2 vectorized
-      File Output Operator [FS_8]
-        Limit [LIM_7] (rows=5 width=178)
+      File Output Operator [FS_10]
+        Limit [LIM_9] (rows=5 width=178)
           Number of rows:5
-          Select Operator [SEL_6] (rows=500 width=178)
+          Select Operator [SEL_8] (rows=500 width=178)
             Output:["_col0","_col1"]
-          <-Map 1 [SIMPLE_EDGE]
-            SHUFFLE [RS_2]
-              Select Operator [SEL_1] (rows=500 width=178)
+          <-Map 1 [SIMPLE_EDGE] vectorized
+            SHUFFLE [RS_7]
+              Select Operator [SEL_6] (rows=500 width=178)
                 Output:["_col0","_col1"]
                 TableScan [TS_0] (rows=500 width=178)
                   
default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
@@ -697,6 +697,7 @@ STAGE PLANS:
                             sort order: 
                             Statistics: Num rows: 1 Data size: 12 Basic stats: 
COMPLETE Column stats: NONE
                             value expressions: _col0 (type: int), _col1 (type: 
int), _col2 (type: binary)
+            Execution mode: vectorized
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -732,6 +733,7 @@ STAGE PLANS:
                                 input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                                 output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                                 serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/tez_join_hash.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez_join_hash.q.out 
b/ql/src/test/results/clientpositive/tez_join_hash.q.out
index c5e4757..c9b8169 100644
--- a/ql/src/test/results/clientpositive/tez_join_hash.q.out
+++ b/ql/src/test/results/clientpositive/tez_join_hash.q.out
@@ -89,6 +89,7 @@ STAGE PLANS:
               sort order: 
               Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
               value expressions: _col0 (type: bigint)
+      Execution mode: vectorized
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -206,6 +207,7 @@ STAGE PLANS:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                           output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                           serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+      Execution mode: vectorized
       Local Work:
         Map Reduce Local Work
 
@@ -306,6 +308,7 @@ STAGE PLANS:
               Map-reduce partition columns: _col0 (type: string), _col1 (type: 
string)
               Statistics: Num rows: 4620 Data size: 49082 Basic stats: 
COMPLETE Column stats: NONE
               value expressions: _col2 (type: bigint)
+      Execution mode: vectorized
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -394,6 +397,7 @@ STAGE PLANS:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                           output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                           serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+      Execution mode: vectorized
       Local Work:
         Map Reduce Local Work
 
@@ -518,6 +522,7 @@ STAGE PLANS:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                           output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                           serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+      Execution mode: vectorized
       Local Work:
         Map Reduce Local Work
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vector_bucket.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_bucket.q.out 
b/ql/src/test/results/clientpositive/vector_bucket.q.out
index 2825489..0eeb8a5 100644
--- a/ql/src/test/results/clientpositive/vector_bucket.q.out
+++ b/ql/src/test/results/clientpositive/vector_bucket.q.out
@@ -28,19 +28,36 @@ STAGE PLANS:
           TableScan
             alias: values__tmp__table__1
             Statistics: Num rows: 1 Data size: 26 Basic stats: COMPLETE Column 
stats: NONE
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1]
             Select Operator
               expressions: tmp_values_col1 (type: string), tmp_values_col2 
(type: string)
               outputColumnNames: _col0, _col1
+              Select Vectorization:
+                  className: VectorSelectOperator
+                  native: true
+                  projectedOutputColumns: [0, 1]
               Statistics: Num rows: 1 Data size: 26 Basic stats: COMPLETE 
Column stats: NONE
               Reduce Output Operator
                 sort order: 
                 Map-reduce partition columns: UDFToInteger(_col0) (type: int)
+                Reduce Sink Vectorization:
+                    className: VectorReduceSinkOperator
+                    native: false
+                    nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No 
DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe 
for values IS true
+                    nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
                 Statistics: Num rows: 1 Data size: 26 Basic stats: COMPLETE 
Column stats: NONE
                 value expressions: _col0 (type: string), _col1 (type: string)
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vector_cast_constant.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_cast_constant.q.out 
b/ql/src/test/results/clientpositive/vector_cast_constant.q.out
index bf21732..b94b56e 100644
--- a/ql/src/test/results/clientpositive/vector_cast_constant.q.out
+++ b/ql/src/test/results/clientpositive/vector_cast_constant.q.out
@@ -192,16 +192,29 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1, 2, 3]
             Reduce Output Operator
               key expressions: _col0 (type: int)
               sort order: +
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns 
IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS 
true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false, No TopN IS false
               Statistics: Num rows: 524 Data size: 155436 Basic stats: 
COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: double), _col2 (type: double), 
_col3 (type: decimal(14,4))
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vector_char_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_char_2.q.out 
b/ql/src/test/results/clientpositive/vector_char_2.q.out
index 60bd50f..d4e5225 100644
--- a/ql/src/test/results/clientpositive/vector_char_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_char_2.q.out
@@ -147,16 +147,29 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1, 2]
             Reduce Output Operator
               key expressions: _col0 (type: char(20))
               sort order: +
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns 
IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS 
true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false, No TopN IS false
               Statistics: Num rows: 250 Data size: 49500 Basic stats: COMPLETE 
Column stats: NONE
               TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: bigint), _col2 (type: bigint)
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -325,16 +338,29 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1, 2]
             Reduce Output Operator
               key expressions: _col0 (type: char(20))
               sort order: -
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns 
IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS 
true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false, No TopN IS false
               Statistics: Num rows: 250 Data size: 49500 Basic stats: COMPLETE 
Column stats: NONE
               TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: bigint), _col2 (type: bigint)
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vector_decimal_round.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_round.q.out 
b/ql/src/test/results/clientpositive/vector_decimal_round.q.out
index 62461eb..aaa61cc 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_round.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_round.q.out
@@ -49,19 +49,37 @@ STAGE PLANS:
           TableScan
             alias: decimal_tbl_txt
             Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column 
stats: NONE
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0]
             Select Operator
               expressions: dec (type: decimal(10,0)), round(dec, -1) (type: 
decimal(11,0))
               outputColumnNames: _col0, _col1
+              Select Vectorization:
+                  className: VectorSelectOperator
+                  native: true
+                  projectedOutputColumns: [0, 1]
+                  selectExpressions: 
FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -1) -> 
1:decimal(11,0)
               Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE 
Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: decimal(10,0))
                 sort order: +
+                Reduce Sink Vectorization:
+                    className: VectorReduceSinkOperator
+                    native: false
+                    nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No 
DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe 
for values IS true
+                    nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
                 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE 
Column stats: NONE
                 value expressions: _col1 (type: decimal(11,0))
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -115,19 +133,36 @@ STAGE PLANS:
           TableScan
             alias: decimal_tbl_txt
             Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column 
stats: NONE
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0]
             Select Operator
               expressions: dec (type: decimal(10,0))
               outputColumnNames: _col0
+              Select Vectorization:
+                  className: VectorSelectOperator
+                  native: true
+                  projectedOutputColumns: [0]
               Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE 
Column stats: NONE
               Reduce Output Operator
                 key expressions: round(_col0, -1) (type: decimal(11,0))
                 sort order: +
+                Reduce Sink Vectorization:
+                    className: VectorReduceSinkOperator
+                    native: false
+                    nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No 
DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe 
for values IS true
+                    nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
                 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE 
Column stats: NONE
                 value expressions: _col0 (type: decimal(10,0))
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vector_groupby4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby4.q.out 
b/ql/src/test/results/clientpositive/vector_groupby4.q.out
index d22e9c1..799797d 100644
--- a/ql/src/test/results/clientpositive/vector_groupby4.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby4.q.out
@@ -99,15 +99,28 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0]
             Reduce Output Operator
               key expressions: _col0 (type: string)
               sort order: +
               Map-reduce partition columns: _col0 (type: string)
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No 
DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe 
for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
               Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE 
Column stats: NONE
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vector_groupby6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby6.q.out 
b/ql/src/test/results/clientpositive/vector_groupby6.q.out
index e323197..6fee467 100644
--- a/ql/src/test/results/clientpositive/vector_groupby6.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby6.q.out
@@ -99,15 +99,28 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0]
             Reduce Output Operator
               key expressions: _col0 (type: string)
               sort order: +
               Map-reduce partition columns: _col0 (type: string)
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No 
DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe 
for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
               Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE 
Column stats: NONE
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out 
b/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out
index feada86..df1d435 100644
--- a/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out
@@ -36,27 +36,46 @@ STAGE PLANS:
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1]
             Select Operator
               expressions: key (type: string)
               outputColumnNames: key
+              Select Vectorization:
+                  className: VectorSelectOperator
+                  native: true
+                  projectedOutputColumns: [0]
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
               Group By Operator
                 aggregations: count(), count(key)
                 Group By Vectorization:
-                    vectorOutput: false
+                    aggregators: VectorUDAFCountStar(*) -> bigint, 
VectorUDAFCount(col 0) -> bigint
+                    className: VectorGroupByOperator
+                    vectorOutput: true
                     native: false
-                    projectedOutputColumns: null
+                    projectedOutputColumns: [0, 1]
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE 
Column stats: NONE
                 Reduce Output Operator
                   sort order: 
+                  Reduce Sink Vectorization:
+                      className: VectorReduceSinkOperator
+                      native: false
+                      nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No 
DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe 
for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN 
[tez, spark] IS false
                   Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE 
Column stats: NONE
                   value expressions: _col0 (type: bigint), _col1 (type: bigint)
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -102,24 +121,40 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1]
             Map Join Operator
               condition map:
                    Inner Join 0 to 1
               keys:
                 0 
                 1 
+              Map Join Vectorization:
+                  className: VectorMapJoinOperator
+                  native: false
+                  nativeConditionsMet: hive.mapjoin.optimized.hashtable IS 
true, hive.vectorized.execution.mapjoin.native.enabled IS true, One MapJoin 
Condition IS true, No nullsafe IS true, Small table vectorizes IS true, 
Optimized Table and Supports Key Types IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
               outputColumnNames: _col0, _col1, _col2, _col3
               Statistics: Num rows: 500 Data size: 13812 Basic stats: COMPLETE 
Column stats: NONE
               File Output Operator
                 compressed: false
+                File Sink Vectorization:
+                    className: VectorFileSinkOperator
+                    native: false
                 table:
                     input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                     output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                     serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Local Work:
         Map Reduce Local Work
 
@@ -144,31 +179,55 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1, 2, 3]
             Map Join Operator
               condition map:
                    Left Outer Join0 to 1
               keys:
                 0 _col0 (type: string)
                 1 _col0 (type: string)
+              Map Join Vectorization:
+                  className: VectorMapJoinOperator
+                  native: false
+                  nativeConditionsMet: hive.mapjoin.optimized.hashtable IS 
true, hive.vectorized.execution.mapjoin.native.enabled IS true, One MapJoin 
Condition IS true, No nullsafe IS true, Small table vectorizes IS true, 
Optimized Table and Supports Key Types IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
               outputColumnNames: _col0, _col1, _col2, _col3, _col5
               Statistics: Num rows: 550 Data size: 15193 Basic stats: COMPLETE 
Column stats: NONE
               Filter Operator
+                Filter Vectorization:
+                    className: VectorFilterOperator
+                    native: true
+                    predicateExpression: FilterExprOrExpr(children: 
FilterLongColEqualLongScalar(col 2, val 0) -> boolean, 
FilterExprAndExpr(children: SelectColumnIsNull(col 4) -> boolean, 
SelectColumnIsNotNull(col 0) -> boolean, 
FilterLongColGreaterEqualLongColumn(col 3, col 2) -> boolean) -> boolean) -> 
boolean
                 predicate: ((_col2 = 0) or (_col5 is null and _col0 is not 
null and (_col3 >= _col2))) (type: boolean)
                 Statistics: Num rows: 366 Data size: 10110 Basic stats: 
COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: string)
                   outputColumnNames: _col0, _col1
+                  Select Vectorization:
+                      className: VectorSelectOperator
+                      native: true
+                      projectedOutputColumns: [0, 1]
                   Statistics: Num rows: 366 Data size: 10110 Basic stats: 
COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    File Sink Vectorization:
+                        className: VectorFileSinkOperator
+                        native: false
                     table:
                         input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                         serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Local Work:
         Map Reduce Local Work
 
@@ -176,15 +235,28 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1]
             Reduce Output Operator
               key expressions: _col0 (type: string)
               sort order: +
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No 
DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe 
for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
               Statistics: Num rows: 366 Data size: 10110 Basic stats: COMPLETE 
Column stats: NONE
               value expressions: _col1 (type: string)
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -248,15 +320,24 @@ STAGE PLANS:
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1]
             Select Operator
               expressions: key (type: string)
               outputColumnNames: key
+              Select Vectorization:
+                  className: VectorSelectOperator
+                  native: true
+                  projectedOutputColumns: [0]
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
               Group By Operator
                 Group By Vectorization:
-                    vectorOutput: false
+                    className: VectorGroupByOperator
+                    vectorOutput: true
+                    keyExpressions: col 0
                     native: false
-                    projectedOutputColumns: null
+                    projectedOutputColumns: []
                 keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0
@@ -265,11 +346,21 @@ STAGE PLANS:
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
+                  Reduce Sink Vectorization:
+                      className: VectorReduceSinkOperator
+                      native: false
+                      nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No 
DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe 
for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN 
[tez, spark] IS false
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -318,3 +409,49 @@ order by key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
+PREHOOK: query: CREATE TABLE orcsrc STORED AS ORC AS SELECT * FROM src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcsrc
+POSTHOOK: query: CREATE TABLE orcsrc STORED AS ORC AS SELECT * FROM src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcsrc
+POSTHOOK: Lineage: orcsrc.key SIMPLE [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
+POSTHOOK: Lineage: orcsrc.value SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
+Warning: Map Join MAPJOIN[34][bigTable=?] in task 'Stage-8:MAPRED' is a cross 
product
+PREHOOK: query: select *
+from orcsrc
+where not key in
+(select key from orcsrc)
+order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcsrc
+#### A masked pattern was here ####
+POSTHOOK: query: select *
+from orcsrc
+where not key in
+(select key from orcsrc)
+order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcsrc
+#### A masked pattern was here ####
+Warning: Map Join MAPJOIN[34][bigTable=?] in task 'Stage-8:MAPRED' is a cross 
product
+PREHOOK: query: select *
+from orcsrc
+where not key in
+(select key from orcsrc)
+order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcsrc
+#### A masked pattern was here ####
+POSTHOOK: query: select *
+from orcsrc
+where not key in
+(select key from orcsrc)
+order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcsrc
+#### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out 
b/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
index 56d3e46..4cfe3c4 100644
--- a/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
@@ -473,14 +473,27 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0]
             Reduce Output Operator
               key expressions: _col0 (type: int)
               sort order: +
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No 
DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe 
for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
               Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE 
Column stats: NONE
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -736,15 +749,28 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1, 2]
             Reduce Output Operator
               key expressions: _col0 (type: int)
               sort order: +
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No 
DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe 
for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
               Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE 
Column stats: NONE
               value expressions: _col1 (type: bigint), _col2 (type: bigint)
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -1000,15 +1026,28 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1, 2]
             Reduce Output Operator
               key expressions: _col0 (type: int), _col1 (type: int)
               sort order: ++
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No 
DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe 
for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
               Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE 
Column stats: NONE
               value expressions: _col2 (type: bigint)
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out 
b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
index a2f59d5..3e96d10 100644
--- a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
@@ -27,14 +27,23 @@ STAGE PLANS:
           TableScan
             alias: lineitem
             Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE 
Column stats: NONE
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 
12, 13, 14, 15]
             Filter Operator
+              Filter Vectorization:
+                  className: VectorFilterOperator
+                  native: true
+                  predicateExpression: SelectColumnIsNotNull(col 1) -> boolean
               predicate: l_partkey is not null (type: boolean)
               Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE 
Column stats: NONE
               Group By Operator
                 Group By Vectorization:
-                    vectorOutput: false
+                    className: VectorGroupByOperator
+                    vectorOutput: true
+                    keyExpressions: col 1
                     native: false
-                    projectedOutputColumns: null
+                    projectedOutputColumns: []
                 keys: l_partkey (type: int)
                 mode: hash
                 outputColumnNames: _col0
@@ -43,11 +52,21 @@ STAGE PLANS:
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
+                  Reduce Sink Vectorization:
+                      className: VectorReduceSinkOperator
+                      native: false
+                      nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No 
DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe 
for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN 
[tez, spark] IS false
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: 
COMPLETE Column stats: NONE
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -123,12 +142,20 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0]
             Map Join Operator
               condition map:
                    Inner Join 0 to 1
               keys:
                 0 _col0 (type: int)
                 1 _col1 (type: int)
+              Map Join Vectorization:
+                  className: VectorMapJoinOperator
+                  native: false
+                  nativeConditionsMet: hive.mapjoin.optimized.hashtable IS 
true, hive.vectorized.execution.mapjoin.native.enabled IS true, One MapJoin 
Condition IS true, No nullsafe IS true, Small table vectorizes IS true, 
Optimized Table and Supports Key Types IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
               outputColumnNames: _col0, _col1, _col3
               Statistics: Num rows: 55 Data size: 6598 Basic stats: COMPLETE 
Column stats: NONE
               Map Join Operator
@@ -137,23 +164,40 @@ STAGE PLANS:
                 keys:
                   0 _col1 (type: int)
                   1 _col0 (type: int)
+                Map Join Vectorization:
+                    className: VectorMapJoinOperator
+                    native: false
+                    nativeConditionsMet: hive.mapjoin.optimized.hashtable IS 
true, hive.vectorized.execution.mapjoin.native.enabled IS true, One MapJoin 
Condition IS true, No nullsafe IS true, Small table vectorizes IS true, 
Optimized Table and Supports Key Types IS true
+                    nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
                 outputColumnNames: _col0, _col3
                 Statistics: Num rows: 60 Data size: 7257 Basic stats: COMPLETE 
Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: int), _col3 (type: int)
                   outputColumnNames: _col0, _col1
+                  Select Vectorization:
+                      className: VectorSelectOperator
+                      native: true
+                      projectedOutputColumns: [0, 1]
                   Statistics: Num rows: 60 Data size: 7257 Basic stats: 
COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    File Sink Vectorization:
+                        className: VectorFileSinkOperator
+                        native: false
                     Statistics: Num rows: 60 Data size: 7257 Basic stats: 
COMPLETE Column stats: NONE
                     table:
                         input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                         serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Local Work:
         Map Reduce Local Work
 
@@ -216,14 +260,23 @@ STAGE PLANS:
           TableScan
             alias: lineitem
             Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE 
Column stats: NONE
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 
12, 13, 14, 15]
             Filter Operator
+              Filter Vectorization:
+                  className: VectorFilterOperator
+                  native: true
+                  predicateExpression: SelectColumnIsNotNull(col 1) -> boolean
               predicate: l_partkey is not null (type: boolean)
               Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE 
Column stats: NONE
               Group By Operator
                 Group By Vectorization:
-                    vectorOutput: false
+                    className: VectorGroupByOperator
+                    vectorOutput: true
+                    keyExpressions: col 1
                     native: false
-                    projectedOutputColumns: null
+                    projectedOutputColumns: []
                 keys: l_partkey (type: int)
                 mode: hash
                 outputColumnNames: _col0
@@ -232,11 +285,21 @@ STAGE PLANS:
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
+                  Reduce Sink Vectorization:
+                      className: VectorReduceSinkOperator
+                      native: false
+                      nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No 
DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe 
for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN 
[tez, spark] IS false
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: 
COMPLETE Column stats: NONE
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -312,12 +375,20 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0]
             Map Join Operator
               condition map:
                    Inner Join 0 to 1
               keys:
                 0 _col0 (type: int)
                 1 _col1 (type: int)
+              Map Join Vectorization:
+                  className: VectorMapJoinOperator
+                  native: false
+                  nativeConditionsMet: hive.mapjoin.optimized.hashtable IS 
true, hive.vectorized.execution.mapjoin.native.enabled IS true, One MapJoin 
Condition IS true, No nullsafe IS true, Small table vectorizes IS true, 
Optimized Table and Supports Key Types IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
               outputColumnNames: _col0, _col1, _col3, _col4
               Statistics: Num rows: 55 Data size: 6598 Basic stats: COMPLETE 
Column stats: NONE
               Map Join Operator
@@ -326,23 +397,40 @@ STAGE PLANS:
                 keys:
                   0 _col1 (type: int), _col4 (type: int)
                   1 _col0 (type: int), _col1 (type: int)
+                Map Join Vectorization:
+                    className: VectorMapJoinOperator
+                    native: false
+                    nativeConditionsMet: hive.mapjoin.optimized.hashtable IS 
true, hive.vectorized.execution.mapjoin.native.enabled IS true, One MapJoin 
Condition IS true, No nullsafe IS true, Small table vectorizes IS true, 
Optimized Table and Supports Key Types IS true
+                    nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
                 outputColumnNames: _col0, _col3
                 Statistics: Num rows: 60 Data size: 7257 Basic stats: COMPLETE 
Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: int), _col3 (type: int)
                   outputColumnNames: _col0, _col1
+                  Select Vectorization:
+                      className: VectorSelectOperator
+                      native: true
+                      projectedOutputColumns: [0, 1]
                   Statistics: Num rows: 60 Data size: 7257 Basic stats: 
COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
+                    File Sink Vectorization:
+                        className: VectorFileSinkOperator
+                        native: false
                     Statistics: Num rows: 60 Data size: 7257 Basic stats: 
COMPLETE Column stats: NONE
                     table:
                         input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                         serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Local Work:
         Map Reduce Local Work
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out 
b/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out
index b6cdce1..0fa8e2f 100644
--- a/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out
+++ b/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out
@@ -371,10 +371,15 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
               TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: bigint)
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -403,10 +408,15 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
               TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: bigint)
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vector_orderby_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_orderby_5.q.out 
b/ql/src/test/results/clientpositive/vector_orderby_5.q.out
index e156e08..b02e4ee 100644
--- a/ql/src/test/results/clientpositive/vector_orderby_5.q.out
+++ b/ql/src/test/results/clientpositive/vector_orderby_5.q.out
@@ -193,15 +193,28 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1]
             Reduce Output Operator
               key expressions: _col0 (type: boolean)
               sort order: -
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No 
DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe 
for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
               Statistics: Num rows: 1000 Data size: 459356 Basic stats: 
COMPLETE Column stats: NONE
               value expressions: _col1 (type: bigint)
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out 
b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
index 0f0548b..73aa28b 100644
--- a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
@@ -114,16 +114,29 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1, 2, 3, 4]
             Reduce Output Operator
               key expressions: _col0 (type: int), _col1 (type: double), _col2 
(type: decimal(20,10)), _col3 (type: decimal(23,14))
               sort order: ++++
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns 
IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS 
true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false, No TopN IS false
               Statistics: Num rows: 3051 Data size: 720036 Basic stats: 
COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
               value expressions: _col4 (type: decimal(20,10))
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vector_string_concat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_string_concat.q.out 
b/ql/src/test/results/clientpositive/vector_string_concat.q.out
index a121785..067bbaa 100644
--- a/ql/src/test/results/clientpositive/vector_string_concat.q.out
+++ b/ql/src/test/results/clientpositive/vector_string_concat.q.out
@@ -399,15 +399,28 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0]
             Reduce Output Operator
               key expressions: _col0 (type: string)
               sort order: +
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns 
IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS 
true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false, No TopN IS false
               Statistics: Num rows: 1000 Data size: 459356 Basic stats: 
COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out 
b/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out
index ad7af88..38f13da 100644
--- a/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out
+++ b/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out
@@ -273,14 +273,27 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0]
             Reduce Output Operator
               sort order: 
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No 
DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe 
for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false
               Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
               value expressions: _col0 (type: bigint)
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vector_udf_character_length.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/vector_udf_character_length.q.out 
b/ql/src/test/results/clientpositive/vector_udf_character_length.q.out
index 558bfc8..81d801c 100644
--- a/ql/src/test/results/clientpositive/vector_udf_character_length.q.out
+++ b/ql/src/test/results/clientpositive/vector_udf_character_length.q.out
@@ -71,6 +71,7 @@ STAGE PLANS:
                     output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.dest1
+      Execution mode: vectorized
 
   Stage: Stage-7
     Conditional Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vector_udf_octet_length.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_udf_octet_length.q.out 
b/ql/src/test/results/clientpositive/vector_udf_octet_length.q.out
index 37a6786..c71cfef 100644
--- a/ql/src/test/results/clientpositive/vector_udf_octet_length.q.out
+++ b/ql/src/test/results/clientpositive/vector_udf_octet_length.q.out
@@ -54,6 +54,7 @@ STAGE PLANS:
                     output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.dest1
+      Execution mode: vectorized
 
   Stage: Stage-7
     Conditional Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ce695b5d/ql/src/test/results/clientpositive/vectorization_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_13.q.out 
b/ql/src/test/results/clientpositive/vectorization_13.q.out
index cb57133..bd84eaf 100644
--- a/ql/src/test/results/clientpositive/vectorization_13.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_13.q.out
@@ -157,15 +157,28 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 
12, 13, 14, 15, 16, 17, 18, 19, 20]
             Reduce Output Operator
               key expressions: _col0 (type: boolean), _col1 (type: tinyint), 
_col2 (type: timestamp), _col3 (type: float), _col4 (type: string), _col5 
(type: tinyint), _col6 (type: tinyint), _col7 (type: tinyint), _col8 (type: 
double), _col9 (type: double), _col10 (type: double), _col11 (type: float), 
_col12 (type: double), _col13 (type: double), _col14 (type: double), _col15 
(type: decimal(7,3)), _col16 (type: double), _col17 (type: double), _col18 
(type: float), _col19 (type: double), _col20 (type: tinyint)
               sort order: +++++++++++++++++++++
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns 
IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS 
true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false, No TopN IS false
               Statistics: Num rows: 1365 Data size: 293479 Basic stats: 
COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -459,15 +472,28 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            TableScan Vectorization:
+                native: true
+                projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 
12, 13, 14, 15, 16, 17, 18, 19, 20]
             Reduce Output Operator
               key expressions: _col0 (type: boolean), _col1 (type: tinyint), 
_col2 (type: timestamp), _col3 (type: float), _col4 (type: string), _col5 
(type: tinyint), _col6 (type: tinyint), _col7 (type: tinyint), _col8 (type: 
double), _col9 (type: double), _col10 (type: double), _col11 (type: float), 
_col12 (type: double), _col13 (type: double), _col14 (type: double), _col15 
(type: decimal(7,3)), _col16 (type: double), _col17 (type: double), _col18 
(type: float), _col19 (type: double), _col20 (type: tinyint)
               sort order: +++++++++++++++++++++
+              Reduce Sink Vectorization:
+                  className: VectorReduceSinkOperator
+                  native: false
+                  nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns 
IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS 
true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, 
spark] IS false, No TopN IS false
               Statistics: Num rows: 1365 Data size: 293479 Basic stats: 
COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
+      Execution mode: vectorized
       Map Vectorization:
-          enabled: false
-          enabledConditionsNotMet: 
hive.vectorized.use.vector.serde.deserialize IS false
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize 
IS true
+          groupByVectorOutput: true
           inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true

Reply via email to