HIVE-18067 : Remove extraneous golden files

Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/afa9ffee
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/afa9ffee
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/afa9ffee

Branch: refs/heads/master
Commit: afa9ffee8ae308c39ece2fa76897a304acd1c1ca
Parents: de78ddb
Author: Ashutosh Chauhan <hashut...@apache.org>
Authored: Tue Nov 14 21:51:38 2017 -0800
Committer: Ashutosh Chauhan <hashut...@apache.org>
Committed: Tue Nov 14 21:51:38 2017 -0800

----------------------------------------------------------------------
 .../clientpositive/insert_column_mixcase.q.out  |    18 -
 .../clientpositive/llap/vector_decimal_6b.q.out |  1099 --
 .../llap/vectorized_distinct_gby.q.out          |   344 -
 .../test/results/clientpositive/mm_exim.q.out   |   557 -
 .../spark/stats_partscan_1_23.q.out             |   188 -
 .../clientpositive/stats_partscan_1_23.q.out    |   191 -
 .../clientpositive/vector_windowing.q.out       |  9276 -------------
 .../vector_windowing_expressions.q.out          |  2077 ---
 .../clientpositive/vector_windowing_gby.q.out   |   245 -
 .../clientpositive/vector_windowing_gby2.q.out  |  1007 --
 .../vector_windowing_multipartitioning.q.out    | 11320 ---------------
 .../vector_windowing_order_null.q.out           |  1013 --
 .../vector_windowing_range_multiorder.q.out     | 12270 -----------------
 .../clientpositive/vector_windowing_rank.q.out  |  1564 ---
 .../vector_windowing_streaming.q.out            |   849 --
 .../vector_windowing_windowspec.q.out           |  2093 ---
 .../vector_windowing_windowspec4.q.out          |   214 -
 .../vectorization_input_format_excludes.q.out   |  1340 --
 18 files changed, 45665 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/afa9ffee/ql/src/test/results/clientpositive/insert_column_mixcase.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_column_mixcase.q.out 
b/ql/src/test/results/clientpositive/insert_column_mixcase.q.out
deleted file mode 100644
index 9395e89..0000000
--- a/ql/src/test/results/clientpositive/insert_column_mixcase.q.out
+++ /dev/null
@@ -1,18 +0,0 @@
-PREHOOK: query: DROP TABLE IF EXISTS insert_camel_case
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS insert_camel_case
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE insert_camel_case (key int, value string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@insert_camel_case
-POSTHOOK: query: CREATE TABLE insert_camel_case (key int, value string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@insert_camel_case
-PREHOOK: query: INSERT INTO insert_camel_case(KeY, VALuE) SELECT * FROM src 
LIMIT 100
-PREHOOK: type: QUERY
-PREHOOK: Output: default@insert_camel_case
-POSTHOOK: query: INSERT INTO insert_camel_case(KeY, VALuE) SELECT * FROM src 
LIMIT 100
-POSTHOOK: type: QUERY
-POSTHOOK: Output: default@insert_camel_case

http://git-wip-us.apache.org/repos/asf/hive/blob/afa9ffee/ql/src/test/results/clientpositive/llap/vector_decimal_6b.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_6b.q.out 
b/ql/src/test/results/clientpositive/llap/vector_decimal_6b.q.out
deleted file mode 100644
index 0327689..0000000
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_6b.q.out
+++ /dev/null
@@ -1,1099 +0,0 @@
-PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_1_txt
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_1_txt
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_2_txt
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_2_txt
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_2
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_2
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_3_txt
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_3_txt
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_3
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_6_3
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE DECIMAL_6_1_txt(key decimal(10,5), value int)
-ROW FORMAT DELIMITED
-   FIELDS TERMINATED BY ' '
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@DECIMAL_6_1_txt
-POSTHOOK: query: CREATE TABLE DECIMAL_6_1_txt(key decimal(10,5), value int)
-ROW FORMAT DELIMITED
-   FIELDS TERMINATED BY ' '
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@DECIMAL_6_1_txt
-PREHOOK: query: CREATE TABLE DECIMAL_6_2_txt(key decimal(17,5), value int)
-ROW FORMAT DELIMITED
-   FIELDS TERMINATED BY ' '
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@DECIMAL_6_2_txt
-POSTHOOK: query: CREATE TABLE DECIMAL_6_2_txt(key decimal(17,5), value int)
-ROW FORMAT DELIMITED
-   FIELDS TERMINATED BY ' '
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@DECIMAL_6_2_txt
-PREHOOK: query: CREATE TABLE DECIMAL_6_3_txt(key decimal(10,5), value int, 
key_big decimal(20,5))
-ROW FORMAT DELIMITED
-   FIELDS TERMINATED BY ' '
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@DECIMAL_6_3_txt
-POSTHOOK: query: CREATE TABLE DECIMAL_6_3_txt(key decimal(10,5), value int, 
key_big decimal(20,5))
-ROW FORMAT DELIMITED
-   FIELDS TERMINATED BY ' '
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@DECIMAL_6_3_txt
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE 
DECIMAL_6_1_txt
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@decimal_6_1_txt
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE 
DECIMAL_6_1_txt
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@decimal_6_1_txt
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE 
DECIMAL_6_2_txt
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@decimal_6_2_txt
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE 
DECIMAL_6_2_txt
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@decimal_6_2_txt
-PREHOOK: query: INSERT INTO DECIMAL_6_3_txt SELECT key, value, key FROM 
DECIMAL_6_1_txt
-PREHOOK: type: QUERY
-PREHOOK: Input: default@decimal_6_1_txt
-PREHOOK: Output: default@decimal_6_3_txt
-POSTHOOK: query: INSERT INTO DECIMAL_6_3_txt SELECT key, value, key FROM 
DECIMAL_6_1_txt
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@decimal_6_1_txt
-POSTHOOK: Output: default@decimal_6_3_txt
-POSTHOOK: Lineage: decimal_6_3_txt.key SIMPLE 
[(decimal_6_1_txt)decimal_6_1_txt.FieldSchema(name:key, type:decimal(10,5), 
comment:null), ]
-POSTHOOK: Lineage: decimal_6_3_txt.key_big EXPRESSION 
[(decimal_6_1_txt)decimal_6_1_txt.FieldSchema(name:key, type:decimal(10,5), 
comment:null), ]
-POSTHOOK: Lineage: decimal_6_3_txt.value SIMPLE 
[(decimal_6_1_txt)decimal_6_1_txt.FieldSchema(name:value, type:int, 
comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
-SELECT * FROM DECIMAL_6_1_txt where key < 200BD ORDER BY key, value
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
-SELECT * FROM DECIMAL_6_1_txt where key < 200BD ORDER BY key, value
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: decimal_6_1_txt
-                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE 
Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedColumnNums: [0, 1]
-                      projectedColumns: [key:decimal(10,5), value:int]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: 
FilterDecimalColLessDecimalScalar(col 0:decimal(10,5), val 200)
-                    predicate: (key < 200) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 116 Basic stats: 
COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: decimal(10,5)), value (type: int)
-                      outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [0, 1]
-                      Statistics: Num rows: 1 Data size: 116 Basic stats: 
COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: decimal(10,5)), _col1 
(type: int)
-                        sort order: ++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkObjectHashOperator
-                            keyColumnNums: [0, 1]
-                            native: true
-                            nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            valueColumnNums: []
-                        Statistics: Num rows: 1 Data size: 116 Basic stats: 
COMPLETE Column stats: NONE
-            Execution mode: vectorized, llap
-            LLAP IO: no inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                vectorizationSupportRemovedReasons: [DECIMAL_64 removed 
because LLAP is enabled]
-                vectorizationSupport: []
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: key:decimal(10,5), value:int
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: aa
-                reduceColumnSortOrder: ++
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    dataColumns: KEY.reducesinkkey0:decimal(10,5), 
KEY.reducesinkkey1:int
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: decimal(10,5)), 
KEY.reducesinkkey1 (type: int)
-                outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumnNums: [0, 1]
-                Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE 
Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE 
Column stats: NONE
-                  table:
-                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT * FROM DECIMAL_6_1_txt where key < 200BD ORDER BY key, 
value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@decimal_6_1_txt
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM DECIMAL_6_1_txt where key < 200BD ORDER BY key, 
value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@decimal_6_1_txt
-#### A masked pattern was here ####
--4400.00000    4400
--1255.49000    -1255
--1.12200       -11
--1.12000       -1
--0.33300       0
--0.30000       0
-0.00000        0
-0.00000        0
-0.33300        0
-1.00000        1
-1.00000        1
-1.12000        1
-1.12200        1
-2.00000        2
-3.14000        3
-3.14000        3
-3.14000        4
-10.00000       10
-10.73433       5
-124.00000      124
-125.20000      125
-PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
-SELECT * FROM DECIMAL_6_1_txt where key - 100BD < 200BD ORDER BY key, value
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
-SELECT * FROM DECIMAL_6_1_txt where key - 100BD < 200BD ORDER BY key, value
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: decimal_6_1_txt
-                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE 
Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedColumnNums: [0, 1]
-                      projectedColumns: [key:decimal(10,5), value:int]
-                  Filter Operator
-                    Filter Vectorization:
-                        className: VectorFilterOperator
-                        native: true
-                        predicateExpression: 
FilterDecimalColLessDecimalScalar(col 2:decimal(11,5), val 200)(children: 
DecimalColSubtractDecimalScalar(col 0:decimal(10,5), val 100) -> 
2:decimal(11,5))
-                    predicate: ((key - 100) < 200) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 116 Basic stats: 
COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: decimal(10,5)), value (type: int)
-                      outputColumnNames: _col0, _col1
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumnNums: [0, 1]
-                      Statistics: Num rows: 1 Data size: 116 Basic stats: 
COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: decimal(10,5)), _col1 
(type: int)
-                        sort order: ++
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkObjectHashOperator
-                            keyColumnNums: [0, 1]
-                            native: true
-                            nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            valueColumnNums: []
-                        Statistics: Num rows: 1 Data size: 116 Basic stats: 
COMPLETE Column stats: NONE
-            Execution mode: vectorized, llap
-            LLAP IO: no inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                vectorizationSupportRemovedReasons: [DECIMAL_64 removed 
because LLAP is enabled]
-                vectorizationSupport: []
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: key:decimal(10,5), value:int
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: [decimal(11,5)]
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: aa
-                reduceColumnSortOrder: ++
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    dataColumns: KEY.reducesinkkey0:decimal(10,5), 
KEY.reducesinkkey1:int
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: decimal(10,5)), 
KEY.reducesinkkey1 (type: int)
-                outputColumnNames: _col0, _col1
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumnNums: [0, 1]
-                Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE 
Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE 
Column stats: NONE
-                  table:
-                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT * FROM DECIMAL_6_1_txt where key - 100BD < 200BD ORDER 
BY key, value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@decimal_6_1_txt
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM DECIMAL_6_1_txt where key - 100BD < 200BD ORDER 
BY key, value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@decimal_6_1_txt
-#### A masked pattern was here ####
--4400.00000    4400
--1255.49000    -1255
--1.12200       -11
--1.12000       -1
--0.33300       0
--0.30000       0
-0.00000        0
-0.00000        0
-0.33300        0
-1.00000        1
-1.00000        1
-1.12000        1
-1.12200        1
-2.00000        2
-3.14000        3
-3.14000        3
-3.14000        4
-10.00000       10
-10.73433       5
-124.00000      124
-125.20000      125
-PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
-SELECT key, value, key - 100BD FROM DECIMAL_6_1_txt ORDER BY key, value
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
-SELECT key, value, key - 100BD FROM DECIMAL_6_1_txt ORDER BY key, value
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: decimal_6_1_txt
-                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE 
Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedColumnNums: [0, 1]
-                      projectedColumns: [key:decimal(10,5), value:int]
-                  Select Operator
-                    expressions: key (type: decimal(10,5)), value (type: int), 
(key - 100) (type: decimal(11,5))
-                    outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: [0, 1, 2]
-                        selectExpressions: DecimalColSubtractDecimalScalar(col 
0:decimal(10,5), val 100) -> 2:decimal(11,5)
-                    Statistics: Num rows: 1 Data size: 116 Basic stats: 
COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: decimal(10,5)), _col1 
(type: int)
-                      sort order: ++
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [0, 1]
-                          native: true
-                          nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          valueColumnNums: [2]
-                      Statistics: Num rows: 1 Data size: 116 Basic stats: 
COMPLETE Column stats: NONE
-                      value expressions: _col2 (type: decimal(11,5))
-            Execution mode: vectorized, llap
-            LLAP IO: no inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                vectorizationSupportRemovedReasons: [DECIMAL_64 removed 
because LLAP is enabled]
-                vectorizationSupport: []
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0, 1]
-                    dataColumns: key:decimal(10,5), value:int
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: [decimal(11,5)]
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: aa
-                reduceColumnSortOrder: ++
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 3
-                    dataColumns: KEY.reducesinkkey0:decimal(10,5), 
KEY.reducesinkkey1:int, VALUE._col0:decimal(11,5)
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: decimal(10,5)), 
KEY.reducesinkkey1 (type: int), VALUE._col0 (type: decimal(11,5))
-                outputColumnNames: _col0, _col1, _col2
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumnNums: [0, 1, 2]
-                Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE 
Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE 
Column stats: NONE
-                  table:
-                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT key, value, key - 100BD FROM DECIMAL_6_1_txt ORDER BY 
key, value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@decimal_6_1_txt
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value, key - 100BD FROM DECIMAL_6_1_txt ORDER BY 
key, value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@decimal_6_1_txt
-#### A masked pattern was here ####
-NULL   -1234567890     NULL
-NULL   0       NULL
-NULL   3       NULL
-NULL   4       NULL
-NULL   1234567890      NULL
--4400.00000    4400    -4500.00000
--1255.49000    -1255   -1355.49000
--1.12200       -11     -101.12200
--1.12000       -1      -101.12000
--0.33300       0       -100.33300
--0.30000       0       -100.30000
-0.00000        0       -100.00000
-0.00000        0       -100.00000
-0.33300        0       -99.66700
-1.00000        1       -99.00000
-1.00000        1       -99.00000
-1.12000        1       -98.88000
-1.12200        1       -98.87800
-2.00000        2       -98.00000
-3.14000        3       -96.86000
-3.14000        3       -96.86000
-3.14000        4       -96.86000
-10.00000       10      -90.00000
-10.73433       5       -89.26567
-124.00000      124     24.00000
-125.20000      125     25.20000
-23232.23435    2       23132.23435
-PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
-SELECT key, value, key - 100BD, key_big FROM DECIMAL_6_3_txt ORDER BY key, 
value
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
-SELECT key, value, key - 100BD, key_big FROM DECIMAL_6_3_txt ORDER BY key, 
value
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: decimal_6_3_txt
-                  Statistics: Num rows: 27 Data size: 6156 Basic stats: 
COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedColumnNums: [0, 1, 2]
-                      projectedColumns: [key:decimal(10,5), value:int, 
key_big:decimal(20,5)]
-                  Select Operator
-                    expressions: key (type: decimal(10,5)), value (type: int), 
(key - 100) (type: decimal(11,5)), key_big (type: decimal(20,5))
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: [0, 1, 3, 2]
-                        selectExpressions: DecimalColSubtractDecimalScalar(col 
0:decimal(10,5), val 100) -> 3:decimal(11,5)
-                    Statistics: Num rows: 27 Data size: 6156 Basic stats: 
COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: decimal(10,5)), _col1 
(type: int)
-                      sort order: ++
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [0, 1]
-                          native: true
-                          nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          valueColumnNums: [3, 2]
-                      Statistics: Num rows: 27 Data size: 6156 Basic stats: 
COMPLETE Column stats: NONE
-                      value expressions: _col2 (type: decimal(11,5)), _col3 
(type: decimal(20,5))
-            Execution mode: vectorized, llap
-            LLAP IO: no inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                vectorizationSupportRemovedReasons: [DECIMAL_64 removed 
because LLAP is enabled]
-                vectorizationSupport: []
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 3
-                    includeColumns: [0, 1, 2]
-                    dataColumns: key:decimal(10,5), value:int, 
key_big:decimal(20,5)
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: [decimal(11,5)]
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: aa
-                reduceColumnSortOrder: ++
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 4
-                    dataColumns: KEY.reducesinkkey0:decimal(10,5), 
KEY.reducesinkkey1:int, VALUE._col0:decimal(11,5), VALUE._col1:decimal(20,5)
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: decimal(10,5)), 
KEY.reducesinkkey1 (type: int), VALUE._col0 (type: decimal(11,5)), VALUE._col1 
(type: decimal(20,5))
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumnNums: [0, 1, 2, 3]
-                Statistics: Num rows: 27 Data size: 6156 Basic stats: COMPLETE 
Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 27 Data size: 6156 Basic stats: 
COMPLETE Column stats: NONE
-                  table:
-                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT key, value, key - 100BD, key_big FROM DECIMAL_6_3_txt 
ORDER BY key, value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@decimal_6_3_txt
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value, key - 100BD, key_big FROM DECIMAL_6_3_txt 
ORDER BY key, value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@decimal_6_3_txt
-#### A masked pattern was here ####
-NULL   -1234567890     NULL    NULL
-NULL   0       NULL    NULL
-NULL   3       NULL    NULL
-NULL   4       NULL    NULL
-NULL   1234567890      NULL    NULL
--4400.00000    4400    -4500.00000     -4400.00000
--1255.49000    -1255   -1355.49000     -1255.49000
--1.12200       -11     -101.12200      -1.12200
--1.12000       -1      -101.12000      -1.12000
--0.33300       0       -100.33300      -0.33300
--0.30000       0       -100.30000      -0.30000
-0.00000        0       -100.00000      0.00000
-0.00000        0       -100.00000      0.00000
-0.33300        0       -99.66700       0.33300
-1.00000        1       -99.00000       1.00000
-1.00000        1       -99.00000       1.00000
-1.12000        1       -98.88000       1.12000
-1.12200        1       -98.87800       1.12200
-2.00000        2       -98.00000       2.00000
-3.14000        3       -96.86000       3.14000
-3.14000        3       -96.86000       3.14000
-3.14000        4       -96.86000       3.14000
-10.00000       10      -90.00000       10.00000
-10.73433       5       -89.26567       10.73433
-124.00000      124     24.00000        124.00000
-125.20000      125     25.20000        125.20000
-23232.23435    2       23132.23435     23232.23435
-PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
-SELECT key, value, key - 100BD, key_big, key_big - key FROM DECIMAL_6_3_txt 
ORDER BY key, value
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
-SELECT key, value, key - 100BD, key_big, key_big - key FROM DECIMAL_6_3_txt 
ORDER BY key, value
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: decimal_6_3_txt
-                  Statistics: Num rows: 27 Data size: 6156 Basic stats: 
COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedColumnNums: [0, 1, 2]
-                      projectedColumns: [key:decimal(10,5), value:int, 
key_big:decimal(20,5)]
-                  Select Operator
-                    expressions: key (type: decimal(10,5)), value (type: int), 
(key - 100) (type: decimal(11,5)), key_big (type: decimal(20,5)), (key_big - 
key) (type: decimal(21,5))
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: [0, 1, 3, 2, 4]
-                        selectExpressions: DecimalColSubtractDecimalScalar(col 
0:decimal(10,5), val 100) -> 3:decimal(11,5), 
DecimalColSubtractDecimalColumn(col 2:decimal(20,5), col 0:decimal(10,5)) -> 
4:decimal(21,5)
-                    Statistics: Num rows: 27 Data size: 6156 Basic stats: 
COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: decimal(10,5)), _col1 
(type: int)
-                      sort order: ++
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [0, 1]
-                          native: true
-                          nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          valueColumnNums: [3, 2, 4]
-                      Statistics: Num rows: 27 Data size: 6156 Basic stats: 
COMPLETE Column stats: NONE
-                      value expressions: _col2 (type: decimal(11,5)), _col3 
(type: decimal(20,5)), _col4 (type: decimal(21,5))
-            Execution mode: vectorized, llap
-            LLAP IO: no inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                vectorizationSupportRemovedReasons: [DECIMAL_64 removed 
because LLAP is enabled]
-                vectorizationSupport: []
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 3
-                    includeColumns: [0, 1, 2]
-                    dataColumns: key:decimal(10,5), value:int, 
key_big:decimal(20,5)
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: [decimal(11,5), decimal(21,5)]
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: aa
-                reduceColumnSortOrder: ++
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    dataColumns: KEY.reducesinkkey0:decimal(10,5), 
KEY.reducesinkkey1:int, VALUE._col0:decimal(11,5), VALUE._col1:decimal(20,5), 
VALUE._col2:decimal(21,5)
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: decimal(10,5)), 
KEY.reducesinkkey1 (type: int), VALUE._col0 (type: decimal(11,5)), VALUE._col1 
(type: decimal(20,5)), VALUE._col2 (type: decimal(21,5))
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumnNums: [0, 1, 2, 3, 4]
-                Statistics: Num rows: 27 Data size: 6156 Basic stats: COMPLETE 
Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 27 Data size: 6156 Basic stats: 
COMPLETE Column stats: NONE
-                  table:
-                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT key, value, key - 100BD, key_big, key_big - key FROM 
DECIMAL_6_3_txt ORDER BY key, value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@decimal_6_3_txt
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value, key - 100BD, key_big, key_big - key FROM 
DECIMAL_6_3_txt ORDER BY key, value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@decimal_6_3_txt
-#### A masked pattern was here ####
-NULL   -1234567890     NULL    NULL    NULL
-NULL   0       NULL    NULL    NULL
-NULL   3       NULL    NULL    NULL
-NULL   4       NULL    NULL    NULL
-NULL   1234567890      NULL    NULL    NULL
--4400.00000    4400    -4500.00000     -4400.00000     0.00000
--1255.49000    -1255   -1355.49000     -1255.49000     0.00000
--1.12200       -11     -101.12200      -1.12200        0.00000
--1.12000       -1      -101.12000      -1.12000        0.00000
--0.33300       0       -100.33300      -0.33300        0.00000
--0.30000       0       -100.30000      -0.30000        0.00000
-0.00000        0       -100.00000      0.00000 0.00000
-0.00000        0       -100.00000      0.00000 0.00000
-0.33300        0       -99.66700       0.33300 0.00000
-1.00000        1       -99.00000       1.00000 0.00000
-1.00000        1       -99.00000       1.00000 0.00000
-1.12000        1       -98.88000       1.12000 0.00000
-1.12200        1       -98.87800       1.12200 0.00000
-2.00000        2       -98.00000       2.00000 0.00000
-3.14000        3       -96.86000       3.14000 0.00000
-3.14000        3       -96.86000       3.14000 0.00000
-3.14000        4       -96.86000       3.14000 0.00000
-10.00000       10      -90.00000       10.00000        0.00000
-10.73433       5       -89.26567       10.73433        0.00000
-124.00000      124     24.00000        124.00000       0.00000
-125.20000      125     25.20000        125.20000       0.00000
-23232.23435    2       23132.23435     23232.23435     0.00000
-PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
-SELECT key, value, cast(key as decimal(20,4)) FROM DECIMAL_6_3_txt ORDER BY 
key, value
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
-SELECT key, value, cast(key as decimal(20,4)) FROM DECIMAL_6_3_txt ORDER BY 
key, value
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: decimal_6_3_txt
-                  Statistics: Num rows: 27 Data size: 3132 Basic stats: 
COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedColumnNums: [0, 1, 2]
-                      projectedColumns: [key:decimal(10,5), value:int, 
key_big:decimal(20,5)]
-                  Select Operator
-                    expressions: key (type: decimal(10,5)), value (type: int), 
CAST( key AS decimal(20,4)) (type: decimal(20,4))
-                    outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: [0, 1, 3]
-                        selectExpressions: CastDecimalToDecimal(col 
0:decimal(10,5)) -> 3:decimal(20,4)
-                    Statistics: Num rows: 27 Data size: 3132 Basic stats: 
COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: decimal(10,5)), _col1 
(type: int)
-                      sort order: ++
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [0, 1]
-                          native: true
-                          nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          valueColumnNums: [3]
-                      Statistics: Num rows: 27 Data size: 3132 Basic stats: 
COMPLETE Column stats: NONE
-                      value expressions: _col2 (type: decimal(20,4))
-            Execution mode: vectorized, llap
-            LLAP IO: no inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                vectorizationSupportRemovedReasons: [DECIMAL_64 removed 
because LLAP is enabled]
-                vectorizationSupport: []
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 3
-                    includeColumns: [0, 1]
-                    dataColumns: key:decimal(10,5), value:int, 
key_big:decimal(20,5)
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: [decimal(20,4)]
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: aa
-                reduceColumnSortOrder: ++
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 3
-                    dataColumns: KEY.reducesinkkey0:decimal(10,5), 
KEY.reducesinkkey1:int, VALUE._col0:decimal(20,4)
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: decimal(10,5)), 
KEY.reducesinkkey1 (type: int), VALUE._col0 (type: decimal(20,4))
-                outputColumnNames: _col0, _col1, _col2
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumnNums: [0, 1, 2]
-                Statistics: Num rows: 27 Data size: 3132 Basic stats: COMPLETE 
Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 27 Data size: 3132 Basic stats: 
COMPLETE Column stats: NONE
-                  table:
-                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT key, value, cast(key as decimal(20,4)) FROM 
DECIMAL_6_3_txt ORDER BY key, value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@decimal_6_3_txt
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value, cast(key as decimal(20,4)) FROM 
DECIMAL_6_3_txt ORDER BY key, value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@decimal_6_3_txt
-#### A masked pattern was here ####
-NULL   -1234567890     NULL
-NULL   0       NULL
-NULL   3       NULL
-NULL   4       NULL
-NULL   1234567890      NULL
--4400.00000    4400    -4400.0000
--1255.49000    -1255   -1255.4900
--1.12200       -11     -1.1220
--1.12000       -1      -1.1200
--0.33300       0       -0.3330
--0.30000       0       -0.3000
-0.00000        0       0.0000
-0.00000        0       0.0000
-0.33300        0       0.3330
-1.00000        1       1.0000
-1.00000        1       1.0000
-1.12000        1       1.1200
-1.12200        1       1.1220
-2.00000        2       2.0000
-3.14000        3       3.1400
-3.14000        3       3.1400
-3.14000        4       3.1400
-10.00000       10      10.0000
-10.73433       5       10.7343
-124.00000      124     124.0000
-125.20000      125     125.2000
-23232.23435    2       23232.2344
-PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
-SELECT key, value, key * value FROM DECIMAL_6_3_txt ORDER BY key, value
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
-SELECT key, value, key * value FROM DECIMAL_6_3_txt ORDER BY key, value
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: decimal_6_3_txt
-                  Statistics: Num rows: 27 Data size: 3132 Basic stats: 
COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedColumnNums: [0, 1, 2]
-                      projectedColumns: [key:decimal(10,5), value:int, 
key_big:decimal(20,5)]
-                  Select Operator
-                    expressions: key (type: decimal(10,5)), value (type: int), 
(key * CAST( value AS decimal(10,0))) (type: decimal(21,5))
-                    outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: [0, 1, 4]
-                        selectExpressions: DecimalColMultiplyDecimalColumn(col 
0:decimal(10,5), col 3:decimal(10,0))(children: CastLongToDecimal(col 1:int) -> 
3:decimal(10,0)) -> 4:decimal(21,5)
-                    Statistics: Num rows: 27 Data size: 3132 Basic stats: 
COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: decimal(10,5)), _col1 
(type: int)
-                      sort order: ++
-                      Reduce Sink Vectorization:
-                          className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [0, 1]
-                          native: true
-                          nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          valueColumnNums: [4]
-                      Statistics: Num rows: 27 Data size: 3132 Basic stats: 
COMPLETE Column stats: NONE
-                      value expressions: _col2 (type: decimal(21,5))
-            Execution mode: vectorized, llap
-            LLAP IO: no inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                vectorizationSupportRemovedReasons: [DECIMAL_64 removed 
because LLAP is enabled]
-                vectorizationSupport: []
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: true
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 3
-                    includeColumns: [0, 1]
-                    dataColumns: key:decimal(10,5), value:int, 
key_big:decimal(20,5)
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: [decimal(10,0), decimal(21,5)]
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: aa
-                reduceColumnSortOrder: ++
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 3
-                    dataColumns: KEY.reducesinkkey0:decimal(10,5), 
KEY.reducesinkkey1:int, VALUE._col0:decimal(21,5)
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: decimal(10,5)), 
KEY.reducesinkkey1 (type: int), VALUE._col0 (type: decimal(21,5))
-                outputColumnNames: _col0, _col1, _col2
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumnNums: [0, 1, 2]
-                Statistics: Num rows: 27 Data size: 3132 Basic stats: COMPLETE 
Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 27 Data size: 3132 Basic stats: 
COMPLETE Column stats: NONE
-                  table:
-                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT key, value, key * value FROM DECIMAL_6_3_txt ORDER BY 
key, value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@decimal_6_3_txt
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value, key * value FROM DECIMAL_6_3_txt ORDER BY 
key, value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@decimal_6_3_txt
-#### A masked pattern was here ####
-NULL   -1234567890     NULL
-NULL   0       NULL
-NULL   3       NULL
-NULL   4       NULL
-NULL   1234567890      NULL
--4400.00000    4400    -19360000.00000
--1255.49000    -1255   1575639.95000
--1.12200       -11     12.34200
--1.12000       -1      1.12000
--0.33300       0       0.00000
--0.30000       0       0.00000
-0.00000        0       0.00000
-0.00000        0       0.00000
-0.33300        0       0.00000
-1.00000        1       1.00000
-1.00000        1       1.00000
-1.12000        1       1.12000
-1.12200        1       1.12200
-2.00000        2       4.00000
-3.14000        3       9.42000
-3.14000        3       9.42000
-3.14000        4       12.56000
-10.00000       10      100.00000
-10.73433       5       53.67165
-124.00000      124     15376.00000
-125.20000      125     15650.00000
-23232.23435    2       46464.46870

http://git-wip-us.apache.org/repos/asf/hive/blob/afa9ffee/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out 
b/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out
deleted file mode 100644
index 8c0bf86..0000000
--- a/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out
+++ /dev/null
@@ -1,344 +0,0 @@
-PREHOOK: query: create table dtest(a int, b int) clustered by (a) sorted by 
(a) into 1 buckets stored as orc
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dtest
-POSTHOOK: query: create table dtest(a int, b int) clustered by (a) sorted by 
(a) into 1 buckets stored as orc
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dtest
-PREHOOK: query: insert into table dtest select c,b from (select 
array(300,300,300,300,300) as a, 1 as b from src order by a limit 1) y lateral 
view  explode(a) t1 as c
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@dtest
-POSTHOOK: query: insert into table dtest select c,b from (select 
array(300,300,300,300,300) as a, 1 as b from src order by a limit 1) y lateral 
view  explode(a) t1 as c
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@dtest
-POSTHOOK: Lineage: dtest.a SCRIPT []
-POSTHOOK: Lineage: dtest.b SIMPLE []
-PREHOOK: query: explain vectorization detail
-select sum(distinct a), count(distinct a) from dtest
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select sum(distinct a), count(distinct a) from dtest
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: dtest
-                  Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE 
Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:a:int, 1:b:int, 
2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Select Operator
-                    expressions: a (type: int)
-                    outputColumnNames: a
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: [0]
-                    Statistics: Num rows: 5 Data size: 20 Basic stats: 
COMPLETE Column stats: NONE
-                    Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          groupByMode: FINAL
-                          keyExpressions: col 0:int
-                          native: false
-                          vectorProcessingMode: STREAMING
-                          projectedOutputColumnNums: []
-                      keys: a (type: int)
-                      mode: final
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 2 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: sum(_col0), count(_col0)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFSumLong(col 0:int) -> 
bigint, VectorUDAFCount(col 0:int) -> bigint
-                            className: VectorGroupByOperator
-                            groupByMode: HASH
-                            native: false
-                            vectorProcessingMode: HASH
-                            projectedOutputColumnNums: [0, 1]
-                        mode: hash
-                        outputColumnNames: _col0, _col1
-                        Statistics: Num rows: 1 Data size: 20 Basic stats: 
COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          sort order: 
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkEmptyKeyOperator
-                              keyColumnNums: []
-                              native: true
-                              nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                              valueColumnNums: [0, 1]
-                          Statistics: Num rows: 1 Data size: 20 Basic stats: 
COMPLETE Column stats: NONE
-                          value expressions: _col0 (type: bigint), _col1 
(type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    includeColumns: [0]
-                    dataColumns: a:int, b:int
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: 
-                reduceColumnSortOrder: 
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 2
-                    dataColumns: VALUE._col0:bigint, VALUE._col1:bigint
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: sum(VALUE._col0), count(VALUE._col1)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0:bigint) -> bigint, 
VectorUDAFCountMerge(col 1:bigint) -> bigint
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    native: false
-                    vectorProcessingMode: GLOBAL
-                    projectedOutputColumnNums: [0, 1]
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE 
Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE 
Column stats: NONE
-                  table:
-                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select sum(distinct a), count(distinct a) from dtest
-PREHOOK: type: QUERY
-PREHOOK: Input: default@dtest
-#### A masked pattern was here ####
-POSTHOOK: query: select sum(distinct a), count(distinct a) from dtest
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@dtest
-#### A masked pattern was here ####
-300    1
-PREHOOK: query: explain vectorization detail
-select sum(distinct cint), count(distinct cint), avg(distinct cint), 
std(distinct cint) from alltypesorc
-PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
-select sum(distinct cint), count(distinct cint), avg(distinct cint), 
std(distinct cint) from alltypesorc
-POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: alltypesorc
-                  Statistics: Num rows: 12288 Data size: 36696 Basic stats: 
COMPLETE Column stats: COMPLETE
-                  TableScan Vectorization:
-                      native: true
-                      vectorizationSchemaColumns: [0:ctinyint:tinyint, 
1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 
5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 
8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 
11:cboolean2:boolean, 
12:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
-                  Select Operator
-                    expressions: cint (type: int)
-                    outputColumnNames: cint
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumnNums: [2]
-                    Statistics: Num rows: 12288 Data size: 36696 Basic stats: 
COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          groupByMode: HASH
-                          keyExpressions: col 2:int
-                          native: false
-                          vectorProcessingMode: HASH
-                          projectedOutputColumnNums: []
-                      keys: cint (type: int)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 6030 Data size: 18008 Basic stats: 
COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkLongOperator
-                            keyColumnNums: [0]
-                            native: true
-                            nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            valueColumnNums: []
-                        Statistics: Num rows: 6030 Data size: 18008 Basic 
stats: COMPLETE Column stats: COMPLETE
-            Execution mode: vectorized, llap
-            LLAP IO: all inputs
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
-                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [2]
-                    dataColumns: ctinyint:tinyint, csmallint:smallint, 
cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, 
cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, 
cboolean1:boolean, cboolean2:boolean
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: a
-                reduceColumnSortOrder: +
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 1
-                    dataColumns: KEY._col0:int
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    keyExpressions: col 0:int
-                    native: false
-                    vectorProcessingMode: MERGE_PARTIAL
-                    projectedOutputColumnNums: []
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 6030 Data size: 18008 Basic stats: 
COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: sum(_col0), count(_col0), avg(_col0), 
std(_col0)
-                  Group By Vectorization:
-                      aggregators: VectorUDAFSumLong(col 0:int) -> bigint, 
VectorUDAFCount(col 0:int) -> bigint, VectorUDAFAvgLong(col 0:int) -> 
struct<count:bigint,sum:double,input:int>, VectorUDAFVarLong(col 0:int) -> 
struct<count:bigint,sum:double,variance:double> aggregation: std
-                      className: VectorGroupByOperator
-                      groupByMode: HASH
-                      native: false
-                      vectorProcessingMode: HASH
-                      projectedOutputColumnNums: [0, 1, 2, 3]
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3
-                  Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE 
Column stats: COMPLETE
-                  Reduce Output Operator
-                    sort order: 
-                    Reduce Sink Vectorization:
-                        className: VectorReduceSinkEmptyKeyOperator
-                        keyColumnNums: []
-                        native: true
-                        nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        valueColumnNums: [0, 1, 2, 3]
-                    Statistics: Num rows: 1 Data size: 172 Basic stats: 
COMPLETE Column stats: COMPLETE
-                    value expressions: _col0 (type: bigint), _col1 (type: 
bigint), _col2 (type: struct<count:bigint,sum:double,input:int>), _col3 (type: 
struct<count:bigint,sum:double,variance:double>)
-        Reducer 3 
-            Execution mode: vectorized, llap
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                reduceColumnNullOrder: 
-                reduceColumnSortOrder: 
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 4
-                    dataColumns: VALUE._col0:bigint, VALUE._col1:bigint, 
VALUE._col2:struct<count:bigint,sum:double,input:int>, 
VALUE._col3:struct<count:bigint,sum:double,variance:double>
-                    partitionColumnCount: 0
-                    scratchColumnTypeNames: []
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: sum(VALUE._col0), count(VALUE._col1), 
avg(VALUE._col2), std(VALUE._col3)
-                Group By Vectorization:
-                    aggregators: VectorUDAFSumLong(col 0:bigint) -> bigint, 
VectorUDAFCountMerge(col 1:bigint) -> bigint, VectorUDAFAvgFinal(col 
2:struct<count:bigint,sum:double,input:int>) -> double, VectorUDAFVarFinal(col 
3:struct<count:bigint,sum:double,variance:double>) -> double aggregation: std
-                    className: VectorGroupByOperator
-                    groupByMode: MERGEPARTIAL
-                    native: false
-                    vectorProcessingMode: GLOBAL
-                    projectedOutputColumnNums: [0, 1, 2, 3]
-                mode: mergepartial
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE 
Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  File Sink Vectorization:
-                      className: VectorFileSinkOperator
-                      native: false
-                  Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE 
Column stats: COMPLETE
-                  table:
-                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select sum(distinct cint), count(distinct cint), avg(distinct 
cint), std(distinct cint) from alltypesorc
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: select sum(distinct cint), count(distinct cint), avg(distinct 
cint), std(distinct cint) from alltypesorc
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
--3482841611    6082    -572647.4204209142      1.1894598769807303E9

Reply via email to