Modified: hive/trunk/ql/src/test/results/clientpositive/vectorized_ptf.q.out
URL: 
http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/vectorized_ptf.q.out?rev=1654439&r1=1654438&r2=1654439&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/vectorized_ptf.q.out 
(original)
+++ hive/trunk/ql/src/test/results/clientpositive/vectorized_ptf.q.out Sat Jan 
24 00:16:36 2015
@@ -239,7 +239,7 @@ STAGE PLANS:
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: p_name (type: string), p_mfgr (type: string), 
p_size (type: int), p_retailprice (type: double)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -290,33 +290,30 @@ STAGE PLANS:
             name: default.part_orc
       Truncated Path -> Alias:
         /part_orc [part_orc]
+      Execution mode: vectorized
       Needs Tagging: false
       Reduce Operator Tree:
         Extract
           Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
           PTF Operator
             Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-            Select Operator
-              expressions: _col1 (type: string), _col2 (type: string), _col5 
(type: int), _col7 (type: double)
-              outputColumnNames: _col1, _col2, _col5, _col7
-              Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
 #### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                table:
-                    input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns _col1,_col2,_col5,_col7
-                      columns.types string,string,int,double
-                      escape.delim \
-                      serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                    serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
+              NumFilesPerFileSink: 1
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col1,_col2,_col5,_col7
+                    columns.types string,string,int,double
+                    escape.delim \
+                    serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-2
     Map Reduce
@@ -572,7 +569,7 @@ STAGE PLANS:
                 Map-reduce partition columns: p_partkey (type: int)
                 Statistics: Num rows: 13 Data size: 8021 Basic stats: COMPLETE 
Column stats: NONE
                 tag: 0
-                value expressions: p_name (type: string), p_mfgr (type: 
string), p_brand (type: string), p_type (type: string), p_size (type: int), 
p_container (type: string), p_retailprice (type: double), p_comment (type: 
string)
+                value expressions: p_name (type: string), p_mfgr (type: 
string), p_size (type: int)
                 auto parallelism: false
           TableScan
             alias: p2
@@ -647,7 +644,7 @@ STAGE PLANS:
           keys:
             0 p_partkey (type: int)
             1 p_partkey (type: int)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, 
_col7, _col8
+          outputColumnNames: _col1, _col2, _col5
           Statistics: Num rows: 14 Data size: 8823 Basic stats: COMPLETE 
Column stats: NONE
           File Output Operator
             compressed: false
@@ -658,8 +655,8 @@ STAGE PLANS:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 properties:
-                  columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8
-                  columns.types 
int,string,string,string,string,int,string,double,string
+                  columns _col1,_col2,_col5
+                  columns.types string,string,int
                   escape.delim \
                   serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
                 serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -678,7 +675,7 @@ STAGE PLANS:
               Map-reduce partition columns: _col2 (type: string)
               Statistics: Num rows: 14 Data size: 8823 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: _col0 (type: int), _col1 (type: string), 
_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: 
int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+              value expressions: _col1 (type: string), _col2 (type: string), 
_col5 (type: int)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -689,8 +686,8 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:
-              columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8
-              columns.types 
int,string,string,string,string,int,string,double,string
+              columns _col1,_col2,_col5
+              columns.types string,string,int
               escape.delim \
               serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -698,8 +695,8 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
               output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
               properties:
-                columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8
-                columns.types 
int,string,string,string,string,int,string,double,string
+                columns _col1,_col2,_col5
+                columns.types string,string,int
                 escape.delim \
                 serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -711,27 +708,23 @@ STAGE PLANS:
           Statistics: Num rows: 14 Data size: 8823 Basic stats: COMPLETE 
Column stats: NONE
           PTF Operator
             Statistics: Num rows: 14 Data size: 8823 Basic stats: COMPLETE 
Column stats: NONE
-            Select Operator
-              expressions: _col1 (type: string), _col2 (type: string), _col5 
(type: int)
-              outputColumnNames: _col1, _col2, _col5
-              Statistics: Num rows: 14 Data size: 8823 Basic stats: COMPLETE 
Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
 #### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                table:
-                    input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns _col1,_col2,_col5
-                      columns.types string,string,int
-                      escape.delim \
-                      serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                    serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
+              NumFilesPerFileSink: 1
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col1,_col2,_col5
+                    columns.types string,string,int
+                    escape.delim \
+                    serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-3
     Map Reduce
@@ -922,7 +915,7 @@ STAGE PLANS:
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: p_name (type: string), p_mfgr (type: string), 
p_size (type: int)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -973,6 +966,7 @@ STAGE PLANS:
             name: default.part_orc
       Truncated Path -> Alias:
         /part_orc [part_orc]
+      Execution mode: vectorized
       Needs Tagging: false
       Reduce Operator Tree:
         Extract
@@ -1173,7 +1167,7 @@ STAGE PLANS:
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: p_name (type: string), p_mfgr (type: string), 
p_size (type: int), p_retailprice (type: double)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -1224,33 +1218,30 @@ STAGE PLANS:
             name: default.part_orc
       Truncated Path -> Alias:
         /part_orc [part_orc]
+      Execution mode: vectorized
       Needs Tagging: false
       Reduce Operator Tree:
         Extract
           Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
           PTF Operator
             Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-            Select Operator
-              expressions: _col1 (type: string), _col2 (type: string), _col5 
(type: int), _col7 (type: double)
-              outputColumnNames: _col1, _col2, _col5, _col7
-              Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
 #### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                table:
-                    input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns _col1,_col2,_col5,_col7
-                      columns.types string,string,int,double
-                      escape.delim \
-                      serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                    serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
+              NumFilesPerFileSink: 1
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col1,_col2,_col5,_col7
+                    columns.types string,string,int,double
+                    escape.delim \
+                    serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-2
     Map Reduce
@@ -1504,7 +1495,7 @@ STAGE PLANS:
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: p_name (type: string), p_mfgr (type: string), 
p_size (type: int)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -1555,33 +1546,30 @@ STAGE PLANS:
             name: default.part_orc
       Truncated Path -> Alias:
         /part_orc [part_orc]
+      Execution mode: vectorized
       Needs Tagging: false
       Reduce Operator Tree:
         Extract
           Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
           PTF Operator
             Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-            Select Operator
-              expressions: _col1 (type: string), _col2 (type: string), _col5 
(type: int)
-              outputColumnNames: _col1, _col2, _col5
-              Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
 #### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                table:
-                    input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns _col1,_col2,_col5
-                      columns.types string,string,int
-                      escape.delim \
-                      serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                    serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
+              NumFilesPerFileSink: 1
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col1,_col2,_col5
+                    columns.types string,string,int
+                    escape.delim \
+                    serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-2
     Map Reduce
@@ -1845,7 +1833,7 @@ STAGE PLANS:
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: p_name (type: string), p_mfgr (type: string), 
p_size (type: int)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -1896,6 +1884,7 @@ STAGE PLANS:
             name: default.part_orc
       Truncated Path -> Alias:
         /part_orc [part_orc]
+      Execution mode: vectorized
       Needs Tagging: false
       Reduce Operator Tree:
         Extract
@@ -2198,7 +2187,7 @@ STAGE PLANS:
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -2249,37 +2238,34 @@ STAGE PLANS:
             name: default.part_orc
       Truncated Path -> Alias:
         /part_orc [part_orc]
+      Execution mode: vectorized
       Needs Tagging: false
       Reduce Operator Tree:
         Extract
           Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
           PTF Operator
             Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: int), _col1 (type: string), _col2 
(type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), 
_col6 (type: string), _col7 (type: double), _col8 (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7, _col8
-              Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-              Filter Operator
-                isSamplingPred: false
-                predicate: _col0 is not null (type: boolean)
-                Statistics: Num rows: 13 Data size: 8021 Basic stats: COMPLETE 
Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
+            Filter Operator
+              isSamplingPred: false
+              predicate: _col0 is not null (type: boolean)
+              Statistics: Num rows: 13 Data size: 8021 Basic stats: COMPLETE 
Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 0
 #### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  table:
-                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      properties:
-                        columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8
-                        columns.types 
int,string,string,string,string,int,string,double,string
-                        escape.delim \
-                        serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                      serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
+                NumFilesPerFileSink: 1
+                table:
+                    input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    properties:
+                      columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8
+                      columns.types 
int,string,string,string,string,int,string,double,string
+                      escape.delim \
+                      serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                TotalFiles: 1
+                GatherStats: false
+                MultiFileSpray: false
 
   Stage: Stage-2
     Map Reduce
@@ -2545,7 +2531,7 @@ STAGE PLANS:
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -2596,37 +2582,34 @@ STAGE PLANS:
             name: default.part_orc
       Truncated Path -> Alias:
         /part_orc [part_orc]
+      Execution mode: vectorized
       Needs Tagging: false
       Reduce Operator Tree:
         Extract
           Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
           PTF Operator
             Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: int), _col1 (type: string), _col2 
(type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), 
_col6 (type: string), _col7 (type: double), _col8 (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7, _col8
-              Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-              Filter Operator
-                isSamplingPred: false
-                predicate: _col0 is not null (type: boolean)
-                Statistics: Num rows: 13 Data size: 8021 Basic stats: COMPLETE 
Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
+            Filter Operator
+              isSamplingPred: false
+              predicate: _col0 is not null (type: boolean)
+              Statistics: Num rows: 13 Data size: 8021 Basic stats: COMPLETE 
Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 0
 #### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  table:
-                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      properties:
-                        columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8
-                        columns.types 
int,string,string,string,string,int,string,double,string
-                        escape.delim \
-                        serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                      serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
+                NumFilesPerFileSink: 1
+                table:
+                    input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    properties:
+                      columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8
+                      columns.types 
int,string,string,string,string,int,string,double,string
+                      escape.delim \
+                      serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                TotalFiles: 1
+                GatherStats: false
+                MultiFileSpray: false
 
   Stage: Stage-1
     Map Reduce
@@ -2903,7 +2886,7 @@ STAGE PLANS:
                 Map-reduce partition columns: p_mfgr (type: string)
                 Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                 tag: -1
-                value expressions: p_partkey (type: int), p_name (type: 
string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), 
p_size (type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+                value expressions: p_name (type: string), p_mfgr (type: 
string), p_size (type: int)
                 auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -2960,27 +2943,23 @@ STAGE PLANS:
           Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
           PTF Operator
             Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-            Select Operator
-              expressions: _col1 (type: string), _col2 (type: string), _col5 
(type: int)
-              outputColumnNames: _col1, _col2, _col5
-              Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
 #### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                table:
-                    input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns _col1,_col2,_col5
-                      columns.types string,string,int
-                      escape.delim \
-                      serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                    serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
+              NumFilesPerFileSink: 1
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col1,_col2,_col5
+                    columns.types string,string,int
+                    escape.delim \
+                    serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-2
     Map Reduce
@@ -3222,7 +3201,7 @@ STAGE PLANS:
                 Map-reduce partition columns: p_mfgr (type: string)
                 Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                 tag: -1
-                value expressions: p_partkey (type: int), p_name (type: 
string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), 
p_size (type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+                value expressions: p_name (type: string), p_mfgr (type: 
string), p_size (type: int), p_retailprice (type: double)
                 auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -3279,27 +3258,23 @@ STAGE PLANS:
           Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
           PTF Operator
             Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-            Select Operator
-              expressions: _col1 (type: string), _col2 (type: string), _col5 
(type: int), _col7 (type: double)
-              outputColumnNames: _col1, _col2, _col5, _col7
-              Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                table:
-                    input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns _col1,_col2,_col5,_col7
-                      columns.types string,string,int,double
-                      escape.delim \
-                      serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                    serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col1,_col2,_col5,_col7
+                    columns.types string,string,int,double
+                    escape.delim \
+                    serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-2
     Map Reduce
@@ -3544,7 +3519,7 @@ STAGE PLANS:
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: p_name (type: string), p_mfgr (type: string), 
p_size (type: int), p_retailprice (type: double)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -3595,33 +3570,30 @@ STAGE PLANS:
             name: default.part_orc
       Truncated Path -> Alias:
         /part_orc [part_orc]
+      Execution mode: vectorized
       Needs Tagging: false
       Reduce Operator Tree:
         Extract
           Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
           PTF Operator
             Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-            Select Operator
-              expressions: _col1 (type: string), _col2 (type: string), _col5 
(type: int), _col7 (type: double)
-              outputColumnNames: _col1, _col2, _col5, _col7
-              Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
 #### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                table:
-                    input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns _col1,_col2,_col5,_col7
-                      columns.types string,string,int,double
-                      escape.delim \
-                      serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                    serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
+              NumFilesPerFileSink: 1
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col1,_col2,_col5,_col7
+                    columns.types string,string,int,double
+                    escape.delim \
+                    serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-2
     Map Reduce
@@ -3876,7 +3848,7 @@ STAGE PLANS:
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: p_name (type: string), p_mfgr (type: string), 
p_size (type: int), p_retailprice (type: double)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -3927,6 +3899,7 @@ STAGE PLANS:
             name: default.part_orc
       Truncated Path -> Alias:
         /part_orc [part_orc]
+      Execution mode: vectorized
       Needs Tagging: false
       Reduce Operator Tree:
         Extract
@@ -3944,8 +3917,8 @@ STAGE PLANS:
                     input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                     output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                     properties:
-                      columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11
-                      columns.types 
int,string,string,string,string,int,string,double,string,bigint,string,struct<transactionid:bigint,bucketid:int,rowid:bigint>
+                      columns _col1,_col2,_col5,_col7
+                      columns.types string,string,int,double
                       escape.delim \
                       serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
                     serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -3964,7 +3937,7 @@ STAGE PLANS:
               Map-reduce partition columns: _col2 (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: _col0 (type: int), _col1 (type: string), 
_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: 
int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 
(type: bigint), _col10 (type: string), _col11 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: _col1 (type: string), _col2 (type: string), 
_col5 (type: int), _col7 (type: double)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -3975,8 +3948,8 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:
-              columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11
-              columns.types 
int,string,string,string,string,int,string,double,string,bigint,string,struct<transactionid:bigint,bucketid:int,rowid:bigint>
+              columns _col1,_col2,_col5,_col7
+              columns.types string,string,int,double
               escape.delim \
               serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -3984,8 +3957,8 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
               output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
               properties:
-                columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11
-                columns.types 
int,string,string,string,string,int,string,double,string,bigint,string,struct<transactionid:bigint,bucketid:int,rowid:bigint>
+                columns _col1,_col2,_col5,_col7
+                columns.types string,string,int,double
                 escape.delim \
                 serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -3997,27 +3970,23 @@ STAGE PLANS:
           Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
           PTF Operator
             Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-            Select Operator
-              expressions: _col1 (type: string), _col2 (type: string), _col5 
(type: int), _col7 (type: double)
-              outputColumnNames: _col1, _col2, _col5, _col7
-              Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
 #### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                table:
-                    input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns _col1,_col2,_col5,_col7
-                      columns.types string,string,int,double
-                      escape.delim \
-                      serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                    serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
+              NumFilesPerFileSink: 1
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col1,_col2,_col5,_col7
+                    columns.types string,string,int,double
+                    escape.delim \
+                    serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-3
     Map Reduce
@@ -4292,7 +4261,7 @@ STAGE PLANS:
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: p_name (type: string), p_mfgr (type: string), 
p_size (type: int), p_retailprice (type: double)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -4343,33 +4312,30 @@ STAGE PLANS:
             name: default.part_orc
       Truncated Path -> Alias:
         /part_orc [sub1:part_orc]
+      Execution mode: vectorized
       Needs Tagging: false
       Reduce Operator Tree:
         Extract
           Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
           PTF Operator
             Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-            Select Operator
-              expressions: _col1 (type: string), _col2 (type: string), _col5 
(type: int), _col7 (type: double)
-              outputColumnNames: _col1, _col2, _col5, _col7
-              Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
 #### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                table:
-                    input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns _col1,_col2,_col5,_col7
-                      columns.types string,string,int,double
-                      escape.delim \
-                      serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                    serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
+              NumFilesPerFileSink: 1
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col1,_col2,_col5,_col7
+                    columns.types string,string,int,double
+                    escape.delim \
+                    serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-2
     Map Reduce
@@ -4721,7 +4687,7 @@ STAGE PLANS:
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_size (type: int), p_retailprice (type: double)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -4772,37 +4738,34 @@ STAGE PLANS:
             name: default.part_orc
       Truncated Path -> Alias:
         /part_orc [part_orc]
+      Execution mode: vectorized
       Needs Tagging: false
       Reduce Operator Tree:
         Extract
           Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
           PTF Operator
             Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: int), _col1 (type: string), _col2 
(type: string), _col5 (type: int), _col7 (type: double)
-              outputColumnNames: _col0, _col1, _col2, _col5, _col7
-              Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-              Filter Operator
-                isSamplingPred: false
-                predicate: _col0 is not null (type: boolean)
-                Statistics: Num rows: 13 Data size: 8021 Basic stats: COMPLETE 
Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
+            Filter Operator
+              isSamplingPred: false
+              predicate: _col0 is not null (type: boolean)
+              Statistics: Num rows: 13 Data size: 8021 Basic stats: COMPLETE 
Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 0
 #### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  table:
-                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      properties:
-                        columns _col0,_col1,_col2,_col5,_col7
-                        columns.types int,string,string,int,double
-                        escape.delim \
-                        serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                      serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
+                NumFilesPerFileSink: 1
+                table:
+                    input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    properties:
+                      columns _col0,_col1,_col2,_col5,_col7
+                      columns.types int,string,string,int,double
+                      escape.delim \
+                      serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                TotalFiles: 1
+                GatherStats: false
+                MultiFileSpray: false
 
   Stage: Stage-2
     Map Reduce
@@ -5130,7 +5093,7 @@ STAGE PLANS:
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: p_name (type: string), p_mfgr (type: string), 
p_size (type: int)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -5181,6 +5144,7 @@ STAGE PLANS:
             name: default.part_orc
       Truncated Path -> Alias:
         /part_orc [part_orc]
+      Execution mode: vectorized
       Needs Tagging: false
       Reduce Operator Tree:
         Extract
@@ -5980,12 +5944,12 @@ TOK_QUERY
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-3 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-3
-  Stage-1 depends on stages: Stage-4
-  Stage-5 depends on stages: Stage-1
-  Stage-6 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-6
-  Stage-7 depends on stages: Stage-0
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-5 depends on stages: Stage-2
+  Stage-6 depends on stages: Stage-5
+  Stage-1 depends on stages: Stage-6
+  Stage-7 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-2
@@ -6001,7 +5965,7 @@ STAGE PLANS:
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: p_name (type: string), p_mfgr (type: string), 
p_size (type: int), p_retailprice (type: double)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -6052,6 +6016,7 @@ STAGE PLANS:
             name: default.part_orc
       Truncated Path -> Alias:
         /part_orc [part_orc]
+      Execution mode: vectorized
       Needs Tagging: false
       Reduce Operator Tree:
         Extract
@@ -6067,82 +6032,14 @@ STAGE PLANS:
                   input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                   properties:
-                    columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11
-                    columns.types 
int,string,string,string,string,int,string,double,string,bigint,string,struct<transactionid:bigint,bucketid:int,rowid:bigint>
+                    columns _col1,_col2,_col5,_col7
+                    columns.types string,string,int,double
                     escape.delim \
                     serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
                   serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-            Select Operator
-              expressions: _col1 (type: string), _col2 (type: string), _col5 
(type: int), _col7 (type: double)
-              outputColumnNames: _col1, _col2, _col5, _col7
-              Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                table:
-                    input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns _col1,_col2,_col5,_col7
-                      columns.types string,string,int,double
-                      escape.delim \
-                      serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                    serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            GatherStats: false
-            Reduce Output Operator
-              key expressions: _col2 (type: string), _col5 (type: int)
-              sort order: ++
-              Map-reduce partition columns: _col2 (type: string)
-              Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-              tag: -1
-              value expressions: _col1 (type: string)
-              auto parallelism: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: -mr-10002
-            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-            output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-            properties:
-              columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11
-              columns.types 
int,string,string,string,string,int,string,double,string,bigint,string,struct<transactionid:bigint,bucketid:int,rowid:bigint>
-              escape.delim \
-              serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-          
-              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-              output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-              properties:
-                columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11
-                columns.types 
int,string,string,string,string,int,string,double,string,bigint,string,struct<transactionid:bigint,bucketid:int,rowid:bigint>
-                escape.delim \
-                serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-              serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-      Needs Tagging: false
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: 
string), KEY.reducesinkkey1 (type: int)
-          outputColumnNames: _col1, _col2, _col5
-          Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-          PTF Operator
-            Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator
               compressed: false
               GlobalTableId: 0
@@ -6152,8 +6049,8 @@ STAGE PLANS:
                   input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                   properties:
-                    columns _wcol0,_col1,_col2,_col5
-                    columns.types bigint,string,string,int
+                    columns _col1,_col2,_col5,_col7
+                    columns.types string,string,int,double
                     escape.delim \
                     serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
                   serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -6161,30 +6058,30 @@ STAGE PLANS:
               GatherStats: false
               MultiFileSpray: false
 
-  Stage: Stage-4
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
             GatherStats: false
             Reduce Output Operator
-              key expressions: _col2 (type: string), _col2 (type: string), 
_col1 (type: string)
-              sort order: +++
+              key expressions: _col2 (type: string), _col1 (type: string)
+              sort order: ++
               Map-reduce partition columns: _col2 (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: _wcol0 (type: bigint), _col5 (type: int)
+              value expressions: _col5 (type: int), _col7 (type: double)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: -mr-10003
+            base file name: -mr-10002
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:
-              columns _wcol0,_col1,_col2,_col5
-              columns.types bigint,string,string,int
+              columns _col1,_col2,_col5,_col7
+              columns.types string,string,int,double
               escape.delim \
               serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -6192,8 +6089,8 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
               output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
               properties:
-                columns _wcol0,_col1,_col2,_col5
-                columns.types bigint,string,string,int
+                columns _col1,_col2,_col5,_col7
+                columns.types string,string,int,double
                 escape.delim \
                 serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -6202,18 +6099,18 @@ STAGE PLANS:
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
-          expressions: VALUE._col0 (type: bigint), KEY.reducesinkkey2 (type: 
string), KEY.reducesinkkey0 (type: string), VALUE._col4 (type: int)
-          outputColumnNames: _col0, _col2, _col3, _col6
+          expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 
(type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+          outputColumnNames: _col1, _col2, _col5, _col7
           Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
           PTF Operator
             Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
-              expressions: _col3 (type: string), _col2 (type: string), _col6 
(type: int), UDFToInteger(round(_col0, 1)) (type: int), _wcol1 (type: int), 
_wcol2 (type: int), _wcol3 (type: double), _wcol4 (type: int)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7
+              expressions: _col2 (type: string), _col1 (type: string), _col5 
(type: int), _wcol0 (type: int), _wcol1 (type: int), _wcol2 (type: double)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               File Output Operator
                 compressed: false
-                GlobalTableId: 2
+                GlobalTableId: 1
 #### A masked pattern was here ####
                 NumFilesPerFileSink: 1
                 Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
@@ -6223,22 +6120,22 @@ STAGE PLANS:
                     output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
                       bucket_count -1
-                      columns p_mfgr,p_name,p_size,s2,r,dr,cud,fv1
+                      columns p_mfgr,p_name,p_size,r,dr,s
                       columns.comments 
-                      columns.types string:string:int:int:int:int:double:int
+                      columns.types string:string:int:int:int:double
 #### A masked pattern was here ####
-                      name default.part_5
-                      serialization.ddl struct part_5 { string p_mfgr, string 
p_name, i32 p_size, i32 s2, i32 r, i32 dr, double cud, i32 fv1}
+                      name default.part_4
+                      serialization.ddl struct part_4 { string p_mfgr, string 
p_name, i32 p_size, i32 r, i32 dr, double s}
                       serialization.format 1
                       serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.part_5
+                    name: default.part_4
                 TotalFiles: 1
                 GatherStats: true
                 MultiFileSpray: false
 
-  Stage: Stage-1
+  Stage: Stage-0
     Move Operator
       tables:
           replace: true
@@ -6248,41 +6145,41 @@ STAGE PLANS:
               output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
                 bucket_count -1
-                columns p_mfgr,p_name,p_size,s2,r,dr,cud,fv1
+                columns p_mfgr,p_name,p_size,r,dr,s
                 columns.comments 
-                columns.types string:string:int:int:int:int:double:int
+                columns.types string:string:int:int:int:double
 #### A masked pattern was here ####
-                name default.part_5
-                serialization.ddl struct part_5 { string p_mfgr, string 
p_name, i32 p_size, i32 s2, i32 r, i32 dr, double cud, i32 fv1}
+                name default.part_4
+                serialization.ddl struct part_4 { string p_mfgr, string 
p_name, i32 p_size, i32 r, i32 dr, double s}
                 serialization.format 1
                 serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 #### A masked pattern was here ####
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.part_5
+              name: default.part_4
 
-  Stage: Stage-5
+  Stage: Stage-4
     Stats-Aggr Operator
 #### A masked pattern was here ####
 
-  Stage: Stage-6
+  Stage: Stage-5
     Map Reduce
       Map Operator Tree:
           TableScan
             GatherStats: false
             Reduce Output Operator
-              key expressions: _col2 (type: string), _col1 (type: string)
+              key expressions: _col2 (type: string), _col5 (type: int)
               sort order: ++
               Map-reduce partition columns: _col2 (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: _col5 (type: int), _col7 (type: double)
+              value expressions: _col1 (type: string)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: -mr-10004
+            base file name: -mr-10003
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:
@@ -6305,18 +6202,82 @@ STAGE PLANS:
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 
(type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
-          outputColumnNames: _col1, _col2, _col5, _col7
+          expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: 
string), KEY.reducesinkkey1 (type: int)
+          outputColumnNames: _col1, _col2, _col5
+          Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
+          PTF Operator
+            Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _wcol0,_col1,_col2,_col5
+                    columns.types bigint,string,string,int
+                    escape.delim \
+                    serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+
+  Stage: Stage-6
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            GatherStats: false
+            Reduce Output Operator
+              key expressions: _col2 (type: string), _col2 (type: string), 
_col1 (type: string)
+              sort order: +++
+              Map-reduce partition columns: _col2 (type: string)
+              Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
+              tag: -1
+              value expressions: _wcol0 (type: bigint), _col5 (type: int)
+              auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: -mr-10004
+            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+            output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+            properties:
+              columns _wcol0,_col1,_col2,_col5
+              columns.types bigint,string,string,int
+              escape.delim \
+              serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+          
+              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+              output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+              properties:
+                columns _wcol0,_col1,_col2,_col5
+                columns.types bigint,string,string,int
+                escape.delim \
+                serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+      Needs Tagging: false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: bigint), KEY.reducesinkkey2 (type: 
string), KEY.reducesinkkey0 (type: string), VALUE._col4 (type: int)
+          outputColumnNames: _col0, _col2, _col3, _col6
           Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
           PTF Operator
             Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
-              expressions: _col2 (type: string), _col1 (type: string), _col5 
(type: int), _wcol0 (type: int), _wcol1 (type: int), _wcol2 (type: double)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+              expressions: _col3 (type: string), _col2 (type: string), _col6 
(type: int), UDFToInteger(round(_col0, 1)) (type: int), _wcol1 (type: int), 
_wcol2 (type: int), _wcol3 (type: double), _wcol4 (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               File Output Operator
                 compressed: false
-                GlobalTableId: 1
+                GlobalTableId: 2
 #### A masked pattern was here ####
                 NumFilesPerFileSink: 1
                 Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
@@ -6326,22 +6287,22 @@ STAGE PLANS:
                     output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
                       bucket_count -1
-                      columns p_mfgr,p_name,p_size,r,dr,s
+                      columns p_mfgr,p_name,p_size,s2,r,dr,cud,fv1
                       columns.comments 
-                      columns.types string:string:int:int:int:double
+                      columns.types string:string:int:int:int:int:double:int
 #### A masked pattern was here ####
-                      name default.part_4
-                      serialization.ddl struct part_4 { string p_mfgr, string 
p_name, i32 p_size, i32 r, i32 dr, double s}
+                      name default.part_5
+                      serialization.ddl struct part_5 { string p_mfgr, string 
p_name, i32 p_size, i32 s2, i32 r, i32 dr, double cud, i32 fv1}
                       serialization.format 1
                       serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.part_4
+                    name: default.part_5
                 TotalFiles: 1
                 GatherStats: true
                 MultiFileSpray: false
 
-  Stage: Stage-0
+  Stage: Stage-1
     Move Operator
       tables:
           replace: true
@@ -6351,17 +6312,17 @@ STAGE PLANS:
               output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
                 bucket_count -1
-                columns p_mfgr,p_name,p_size,r,dr,s
+                columns p_mfgr,p_name,p_size,s2,r,dr,cud,fv1
                 columns.comments 
-                columns.types string:string:int:int:int:double
+                columns.types string:string:int:int:int:int:double:int
 #### A masked pattern was here ####
-                name default.part_4
-                serialization.ddl struct part_4 { string p_mfgr, string 
p_name, i32 p_size, i32 r, i32 dr, double s}
+                name default.part_5
+                serialization.ddl struct part_5 { string p_mfgr, string 
p_name, i32 p_size, i32 s2, i32 r, i32 dr, double cud, i32 fv1}
                 serialization.format 1
                 serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 #### A masked pattern was here ####
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.part_4
+              name: default.part_5
 
   Stage: Stage-7
     Stats-Aggr Operator
@@ -6647,7 +6608,7 @@ STAGE PLANS:
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: p_name (type: string), p_mfgr (type: string), 
p_size (type: int)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -6698,6 +6659,7 @@ STAGE PLANS:
             name: default.part_orc
       Truncated Path -> Alias:
         /part_orc [part_orc]
+      Execution mode: vectorized
       Needs Tagging: false
       Reduce Operator Tree:
         Extract
@@ -6715,8 +6677,8 @@ STAGE PLANS:
                     input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                     output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                     properties:
-                      columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11
-                      columns.types 
int,string,string,string,string,int,string,double,string,bigint,string,struct<transactionid:bigint,bucketid:int,rowid:bigint>
+                      columns _col1,_col2,_col5
+                      columns.types string,string,int
                       escape.delim \
                       serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
                     serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -6735,7 +6697,7 @@ STAGE PLANS:
               Map-reduce partition columns: _col2 (type: string), _col1 (type: 
string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: _col0 (type: int), _col1 (type: string), 
_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: 
int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 
(type: bigint), _col10 (type: string), _col11 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: _col1 (type: string), _col2 (type: string), 
_col5 (type: int)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -6746,8 +6708,8 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:
-              columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11
-              columns.types 
int,string,string,string,string,int,string,double,string,bigint,string,struct<transactionid:bigint,bucketid:int,rowid:bigint>
+              columns _col1,_col2,_col5
+              columns.types string,string,int
               escape.delim \
               serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -6755,8 +6717,8 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
               output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
               properties:
-                columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11
-                columns.types 
int,string,string,string,string,int,string,double,string,bigint,string,struct<transactionid:bigint,bucketid:int,rowid:bigint>
+                columns _col1,_col2,_col5
+                columns.types string,string,int
                 escape.delim \
                 serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -6768,27 +6730,23 @@ STAGE PLANS:
           Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
           PTF Operator
             Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-            Select Operator
-              expressions: _col1 (type: string), _col2 (type: string), _col5 
(type: int)
-              outputColumnNames: _col1, _col2, _col5
-              Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
 #### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                table:
-                    input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns _col1,_col2,_col5
-                      columns.types string,string,int
-                      escape.delim \
-                      serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                    serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
+              NumFilesPerFileSink: 1
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col1,_col2,_col5
+                    columns.types string,string,int
+                    escape.delim \
+                    serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-3
     Map Reduce
@@ -7094,7 +7052,7 @@ STAGE PLANS:
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: p_partkey (type: int), p_name (type: string), 
p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size 
(type: int), p_container (type: string), p_retailprice (type: double), 
p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), 
INPUT__FILE__NAME (type: string), ROW__ID (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: p_name (type: string), p_mfgr (type: string), 
p_size (type: int)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -7145,6 +7103,7 @@ STAGE PLANS:
             name: default.part_orc
       Truncated Path -> Alias:
         /part_orc [part_orc]
+      Execution mode: vectorized
       Needs Tagging: false
       Reduce Operator Tree:
         Extract
@@ -7160,8 +7119,8 @@ STAGE PLANS:
                   input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                   properties:
-                    columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11
-                    columns.types 
int,string,string,string,string,int,string,double,string,bigint,string,struct<transactionid:bigint,bucketid:int,rowid:bigint>
+                    columns _col1,_col2,_col5
+                    columns.types string,string,int
                     escape.delim \
                     serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
                   serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -7180,7 +7139,7 @@ STAGE PLANS:
               Map-reduce partition columns: _col2 (type: string), _col1 (type: 
string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: _col0 (type: int), _col1 (type: string), 
_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: 
int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 
(type: bigint), _col10 (type: string), _col11 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: _col1 (type: string), _col2 (type: string), 
_col5 (type: int)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -7191,8 +7150,8 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:
-              columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11
-              columns.types 
int,string,string,string,string,int,string,double,string,bigint,string,struct<transactionid:bigint,bucketid:int,rowid:bigint>
+              columns _col1,_col2,_col5
+              columns.types string,string,int
               escape.delim \
               serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -7200,8 +7159,8 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
               output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
               properties:
-                columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11
-                columns.types 
int,string,string,string,string,int,string,double,string,bigint,string,struct<transactionid:bigint,bucketid:int,rowid:bigint>
+                columns _col1,_col2,_col5
+                columns.types string,string,int
                 escape.delim \
                 serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -7222,8 +7181,8 @@ STAGE PLANS:
                   input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                   properties:
-                    columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11
-                    columns.types 
int,string,string,string,string,int,string,double,string,bigint,string,struct<transactionid:bigint,bucketid:int,rowid:bigint>
+                    columns _col1,_col2,_col5
+                    columns.types string,string,int
                     escape.delim \
                     serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
                   serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -7242,7 +7201,7 @@ STAGE PLANS:
               Map-reduce partition columns: _col2 (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1
-              value expressions: _col0 (type: int), _col1 (type: string), 
_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: 
int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 
(type: bigint), _col10 (type: string), _col11 (type: 
struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+              value expressions: _col1 (type: string), _col2 (type: string), 
_col5 (type: int)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -7253,8 +7212,8 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:
-              columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11
-              columns.types 
int,string,string,string,string,int,string,double,string,bigint,string,struct<transactionid:bigint,bucketid:int,rowid:bigint>
+              columns _col1,_col2,_col5
+              columns.types string,string,int
               escape.delim \
               serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -7262,8 +7221,8 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
               output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
               properties:
-                columns 
_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11
-                columns.types 
int,string,string,string,string,int,string,double,string,bigint,string,struct<transactionid:bigint,bucketid:int,rowid:bigint>
+                columns _col1,_col2,_col5
+                columns.types string,string,int
                 escape.delim \
                 serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -7275,27 +7234,23 @@ STAGE PLANS:
           Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
           PTF Operator
             Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-            Select Operator
-              expressions: _col1 (type: string), _col2 (type: string), _col5 
(type: int)
-              outputColumnNames: _col1, _col2, _col5
-              Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
 #### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                table:
-                    input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns _col1,_col2,_col5
-                      columns.types string,string,int
-                      escape.delim \
-                      serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                    serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
+              NumFilesPerFileSink: 1
+              table:
+                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col1,_col2,_col5
+                    columns.types string,string,int
+                    escape.delim \
+                    serialization.lib 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-4
     Map Reduce
@@ -7584,7 +7539,7 @@ STAGE PLANS:
               Map-reduce partition columns: p_mfgr (type: string), p_name 
(type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE 
Column stats: NONE
               tag: -1

[... 451 lines stripped ...]

Reply via email to