http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/stats10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats10.q.out 
b/ql/src/test/results/clientpositive/spark/stats10.q.out
index bcd8ecd..d79fc10 100644
--- a/ql/src/test/results/clientpositive/spark/stats10.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats10.q.out
@@ -381,7 +381,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: bucket3_1
-                  Statistics: Num rows: 1000 Data size: 106240 Basic stats: 
COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
 
   Stage: Stage-1
     Stats Work

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/stats12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats12.q.out 
b/ql/src/test/results/clientpositive/spark/stats12.q.out
index 78e640d..4aa3704 100644
--- a/ql/src/test/results/clientpositive/spark/stats12.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats12.q.out
@@ -52,7 +52,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: analyze_srcpart
-                  Statistics: Num rows: 1 Data size: 232480 Basic stats: 
PARTIAL Column stats: NONE
+                  Statistics: Num rows: 392 Data size: 232480 Basic stats: 
COMPLETE Column stats: NONE
                   Statistics Aggregation Key Prefix: default.analyze_srcpart/
                   GatherStats: true
             Path -> Alias:

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/stats13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats13.q.out 
b/ql/src/test/results/clientpositive/spark/stats13.q.out
index d5b55ed..d59ca8b 100644
--- a/ql/src/test/results/clientpositive/spark/stats13.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats13.q.out
@@ -52,7 +52,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: analyze_srcpart
-                  Statistics: Num rows: 1 Data size: 232480 Basic stats: 
PARTIAL Column stats: NONE
+                  Statistics: Num rows: 392 Data size: 232480 Basic stats: 
COMPLETE Column stats: NONE
                   Statistics Aggregation Key Prefix: default.analyze_srcpart/
                   GatherStats: true
             Path -> Alias:

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/stats2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats2.q.out 
b/ql/src/test/results/clientpositive/spark/stats2.q.out
index c22ac3f..e8330db 100644
--- a/ql/src/test/results/clientpositive/spark/stats2.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats2.q.out
@@ -135,7 +135,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: analyze_t1
-                  Statistics: Num rows: 1 Data size: 232480 Basic stats: 
PARTIAL Column stats: NONE
+                  Statistics: Num rows: 392 Data size: 232480 Basic stats: 
COMPLETE Column stats: NONE
 
   Stage: Stage-1
     Stats Work

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/stats7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats7.q.out 
b/ql/src/test/results/clientpositive/spark/stats7.q.out
index f9c26f9..5d4a4a1 100644
--- a/ql/src/test/results/clientpositive/spark/stats7.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats7.q.out
@@ -50,7 +50,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: analyze_srcpart
-                  Statistics: Num rows: 1 Data size: 232480 Basic stats: 
PARTIAL Column stats: NONE
+                  Statistics: Num rows: 392 Data size: 232480 Basic stats: 
COMPLETE Column stats: NONE
 
   Stage: Stage-1
     Stats Work

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/stats8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats8.q.out 
b/ql/src/test/results/clientpositive/spark/stats8.q.out
index 6aa7dc4..544d170 100644
--- a/ql/src/test/results/clientpositive/spark/stats8.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats8.q.out
@@ -50,7 +50,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: analyze_srcpart
-                  Statistics: Num rows: 1 Data size: 232480 Basic stats: 
PARTIAL Column stats: NONE
+                  Statistics: Num rows: 392 Data size: 232480 Basic stats: 
COMPLETE Column stats: NONE
 
   Stage: Stage-1
     Stats Work
@@ -162,7 +162,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: analyze_srcpart
-                  Statistics: Num rows: 500 Data size: 53120 Basic stats: 
PARTIAL Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
PARTIAL Column stats: NONE
 
   Stage: Stage-1
     Stats Work
@@ -235,7 +235,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: analyze_srcpart
-                  Statistics: Num rows: 1000 Data size: 106240 Basic stats: 
PARTIAL Column stats: NONE
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
PARTIAL Column stats: NONE
 
   Stage: Stage-1
     Stats Work
@@ -308,7 +308,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: analyze_srcpart
-                  Statistics: Num rows: 1500 Data size: 159360 Basic stats: 
PARTIAL Column stats: NONE
+                  Statistics: Num rows: 1500 Data size: 15936 Basic stats: 
PARTIAL Column stats: NONE
 
   Stage: Stage-1
     Stats Work
@@ -381,7 +381,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: analyze_srcpart
-                  Statistics: Num rows: 2000 Data size: 212480 Basic stats: 
COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: 
COMPLETE Column stats: NONE
 
   Stage: Stage-1
     Stats Work

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/stats9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats9.q.out 
b/ql/src/test/results/clientpositive/spark/stats9.q.out
index 1d48a2b..fb2f90a 100644
--- a/ql/src/test/results/clientpositive/spark/stats9.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats9.q.out
@@ -33,7 +33,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: analyze_srcbucket
-                  Statistics: Num rows: 1 Data size: 116030 Basic stats: 
COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 391 Data size: 116030 Basic stats: 
COMPLETE Column stats: COMPLETE
 
   Stage: Stage-1
     Stats Work

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out 
b/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out
index b38ec9a..1f5bd0c 100644
--- a/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out
@@ -308,9 +308,9 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: anaylyze_external
-                  Statistics: Num rows: 500 Data size: 53120 Basic stats: 
COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
-                    Statistics: Num rows: 500 Data size: 53120 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
                       mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_1.q.out 
b/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
index fb82806..a619efd 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
@@ -56,22 +56,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: key
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
                       keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -80,10 +80,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -96,10 +96,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_10.q.out 
b/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
index a2a5766..9bdcaf6 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
@@ -68,14 +68,14 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string), 1 (type: bigint)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 7 Data size: 700 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 900 Basic stats: 
COMPLETE Column stats: NONE
                       table:
                           input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                           output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
@@ -85,35 +85,35 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: key
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
                       keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string), 2 (type: bigint)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 7 Data size: 700 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 900 Basic stats: 
COMPLETE Column stats: NONE
                       table:
                           input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                           output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
@@ -126,10 +126,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 7 Data size: 700 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 3 Data size: 900 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/union_remove_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_11.q.out 
b/ql/src/test/results/clientpositive/spark/union_remove_11.q.out
index f050ca1..c4cf2bb 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_11.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_11.q.out
@@ -66,18 +66,18 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string), 1 (type: int)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Select Operator
                       expressions: _col0 (type: string), UDFToLong(_col1) 
(type: bigint)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 9 Data size: 900 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 900 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 9 Data size: 900 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 900 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                             output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
@@ -87,18 +87,18 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string), 2 (type: int)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Select Operator
                       expressions: _col0 (type: string), UDFToLong(_col1) 
(type: bigint)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 9 Data size: 900 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 900 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 9 Data size: 900 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 900 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                             output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
@@ -108,18 +108,18 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string), 3 (type: int)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Select Operator
                       expressions: _col0 (type: string), UDFToLong(_col1) 
(type: bigint)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 9 Data size: 900 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 900 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 9 Data size: 900 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 900 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                             output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/union_remove_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_12.q.out 
b/ql/src/test/results/clientpositive/spark/union_remove_12.q.out
index 3cec9d4..cf89dab 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_12.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_12.q.out
@@ -84,14 +84,14 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string), 1 (type: bigint)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 6 Data size: 630 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 630 Basic stats: 
COMPLETE Column stats: NONE
                       table:
                           input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                           output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
@@ -101,14 +101,14 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: a
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -118,14 +118,14 @@ STAGE PLANS:
                         outputColumnNames: _col0, _col2
                         input vertices:
                           1 Map 3
-                        Statistics: Num rows: 3 Data size: 330 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 330 Basic stats: 
COMPLETE Column stats: NONE
                         Select Operator
                           expressions: _col0 (type: string), UDFToLong(_col2) 
(type: bigint)
                           outputColumnNames: _col0, _col1
-                          Statistics: Num rows: 3 Data size: 330 Basic stats: 
COMPLETE Column stats: NONE
+                          Statistics: Num rows: 1 Data size: 330 Basic stats: 
COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            Statistics: Num rows: 6 Data size: 630 Basic 
stats: COMPLETE Column stats: NONE
+                            Statistics: Num rows: 2 Data size: 630 Basic 
stats: COMPLETE Column stats: NONE
                             table:
                                 input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                                 output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/union_remove_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_13.q.out 
b/ql/src/test/results/clientpositive/spark/union_remove_13.q.out
index 22a4015..0d21cba 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_13.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_13.q.out
@@ -86,35 +86,35 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: key
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
                       keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: a
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -124,14 +124,14 @@ STAGE PLANS:
                         outputColumnNames: _col0, _col2
                         input vertices:
                           1 Map 4
-                        Statistics: Num rows: 3 Data size: 330 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 330 Basic stats: 
COMPLETE Column stats: NONE
                         Select Operator
                           expressions: _col0 (type: string), UDFToLong(_col2) 
(type: bigint)
                           outputColumnNames: _col0, _col1
-                          Statistics: Num rows: 3 Data size: 330 Basic stats: 
COMPLETE Column stats: NONE
+                          Statistics: Num rows: 1 Data size: 330 Basic stats: 
COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            Statistics: Num rows: 4 Data size: 430 Basic 
stats: COMPLETE Column stats: NONE
+                            Statistics: Num rows: 2 Data size: 630 Basic 
stats: COMPLETE Column stats: NONE
                             table:
                                 input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                                 output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
@@ -146,10 +146,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 4 Data size: 430 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 630 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/union_remove_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_14.q.out 
b/ql/src/test/results/clientpositive/spark/union_remove_14.q.out
index 3cec9d4..cf89dab 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_14.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_14.q.out
@@ -84,14 +84,14 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string), 1 (type: bigint)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 6 Data size: 630 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 630 Basic stats: 
COMPLETE Column stats: NONE
                       table:
                           input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                           output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
@@ -101,14 +101,14 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: a
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -118,14 +118,14 @@ STAGE PLANS:
                         outputColumnNames: _col0, _col2
                         input vertices:
                           1 Map 3
-                        Statistics: Num rows: 3 Data size: 330 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 330 Basic stats: 
COMPLETE Column stats: NONE
                         Select Operator
                           expressions: _col0 (type: string), UDFToLong(_col2) 
(type: bigint)
                           outputColumnNames: _col0, _col1
-                          Statistics: Num rows: 3 Data size: 330 Basic stats: 
COMPLETE Column stats: NONE
+                          Statistics: Num rows: 1 Data size: 330 Basic stats: 
COMPLETE Column stats: NONE
                           File Output Operator
                             compressed: false
-                            Statistics: Num rows: 6 Data size: 630 Basic 
stats: COMPLETE Column stats: NONE
+                            Statistics: Num rows: 2 Data size: 630 Basic 
stats: COMPLETE Column stats: NONE
                             table:
                                 input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                                 output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_15.q.out 
b/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
index bc21af5..58b9688 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
@@ -56,22 +56,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: key
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
                       keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -80,14 +80,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: bigint), '1' 
(type: string)
                   outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 2 Data size: 200 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
                     table:
                         input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
@@ -100,14 +100,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: bigint), '2' 
(type: string)
                   outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 2 Data size: 200 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
                     table:
                         input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_16.q.out 
b/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
index 07a4544..94ee97e 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
@@ -61,22 +61,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: key
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
                       keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -85,14 +85,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: bigint), '1' 
(type: string)
                   outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 2 Data size: 200 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
                     table:
                         input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
@@ -105,14 +105,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: bigint), '2' 
(type: string)
                   outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 2 Data size: 200 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
                     table:
                         input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/union_remove_17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_17.q.out 
b/ql/src/test/results/clientpositive/spark/union_remove_17.q.out
index ffa735d..7715683 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_17.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_17.q.out
@@ -53,18 +53,18 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string), 1 (type: int), '1' (type: 
string)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Select Operator
                       expressions: _col0 (type: string), UDFToLong(_col1) 
(type: bigint), _col2 (type: string)
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 6 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 6 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                             output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
@@ -74,18 +74,18 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string), 2 (type: int), '2' (type: 
string)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Select Operator
                       expressions: _col0 (type: string), UDFToLong(_col1) 
(type: bigint), _col2 (type: string)
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 6 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 6 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
                             output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/union_remove_19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_19.q.out 
b/ql/src/test/results/clientpositive/spark/union_remove_19.q.out
index f021459..896e2d8 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_19.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_19.q.out
@@ -56,22 +56,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: key
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
                       keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -80,10 +80,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -96,10 +96,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -221,21 +221,21 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Filter Operator
                     predicate: (UDFToDouble(key) = 7.0) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 100 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
                       keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 100 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 1 Data size: 100 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -244,10 +244,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -260,10 +260,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -352,21 +352,21 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Filter Operator
                     predicate: ((UDFToDouble(key) + UDFToDouble(key)) >= 7.0) 
(type: boolean)
-                    Statistics: Num rows: 1 Data size: 100 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
                       keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 100 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 1 Data size: 100 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -375,14 +375,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 Select Operator
                   expressions: UDFToString((UDFToDouble(_col0) + 
UDFToDouble(_col0))) (type: string), _col1 (type: bigint)
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 2 Data size: 200 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -395,14 +395,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 Select Operator
                   expressions: UDFToString((UDFToDouble(_col0) + 
UDFToDouble(_col0))) (type: string), _col1 (type: bigint)
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 2 Data size: 200 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/union_remove_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_2.q.out 
b/ql/src/test/results/clientpositive/spark/union_remove_2.q.out
index e4329cf..5fa05a9 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_2.q.out
@@ -59,35 +59,35 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: key
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
                       keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string), 1 (type: bigint)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 7 Data size: 700 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 900 Basic stats: 
COMPLETE Column stats: NONE
                       table:
                           input format: 
org.apache.hadoop.mapred.TextInputFormat
                           output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -97,14 +97,14 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string), 2 (type: bigint)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 7 Data size: 700 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 900 Basic stats: 
COMPLETE Column stats: NONE
                       table:
                           input format: 
org.apache.hadoop.mapred.TextInputFormat
                           output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -117,10 +117,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 7 Data size: 700 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 3 Data size: 900 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/union_remove_20.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_20.q.out 
b/ql/src/test/results/clientpositive/spark/union_remove_20.q.out
index 76fae8c..7ffae80 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_20.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_20.q.out
@@ -56,22 +56,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: key
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
                       keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -80,14 +80,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 Select Operator
                   expressions: _col1 (type: bigint), _col0 (type: string)
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 2 Data size: 200 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -100,14 +100,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 Select Operator
                   expressions: _col1 (type: bigint), _col0 (type: string)
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 2 Data size: 200 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_21.q.out 
b/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
index 98aa546..35bbe82 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
@@ -56,31 +56,31 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: key
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -92,10 +92,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/d0fa7d55/ql/src/test/results/clientpositive/spark/union_remove_22.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_22.q.out 
b/ql/src/test/results/clientpositive/spark/union_remove_22.q.out
index 2510966..1e07020 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_22.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_22.q.out
@@ -56,22 +56,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: key
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(1)
                       keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -80,14 +80,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: bigint), 
_col1 (type: bigint)
                   outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 2 Data size: 200 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -100,14 +100,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: bigint), 
_col1 (type: bigint)
                   outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 2 Data size: 200 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -231,22 +231,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 3 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: key
-                    Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
                       keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 3 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -255,14 +255,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), UDFToLong(concat(_col1, 
_col1)) (type: bigint), UDFToLong(concat(_col1, _col1)) (type: bigint)
                   outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 2 Data size: 200 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -275,14 +275,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE 
Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), UDFToLong(concat(_col1, 
_col1)) (type: bigint), UDFToLong(concat(_col1, _col1)) (type: bigint)
                   outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 2 Data size: 200 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Reply via email to