HIVE-16293: Column pruner should continue to work when SEL has more than 1 
child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/23ac04d3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/23ac04d3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/23ac04d3

Branch: refs/heads/master
Commit: 23ac04d3b511cfc2239b39a30dcc72a173e3ee99
Parents: 1a1e835
Author: Pengcheng Xiong <pxi...@hortonworks.com>
Authored: Tue Apr 4 14:39:44 2017 -0700
Committer: Pengcheng Xiong <pxi...@hortonworks.com>
Committed: Tue Apr 4 14:39:53 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/optimizer/ColumnPruner.java  |  13 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   8 +-
 .../column_pruner_multiple_children.q           |  19 ++
 .../clientnegative/udf_assert_true.q.out        |  72 +++--
 .../clientpositive/add_part_multiple.q.out      |  16 +-
 .../alter_partition_coltype.q.out               |   2 +-
 .../clientpositive/annotate_stats_select.q.out  |   4 +-
 .../clientpositive/autoColumnStats_7.q.out      |   2 +-
 .../clientpositive/autoColumnStats_8.q.out      |   4 +-
 .../results/clientpositive/ba_table_udfs.q.out  |   2 +-
 .../clientpositive/bucket_map_join_spark1.q.out |   8 +-
 .../clientpositive/bucket_map_join_spark2.q.out |   8 +-
 .../clientpositive/bucket_map_join_spark3.q.out |   8 +-
 .../results/clientpositive/bucketmapjoin5.q.out |  12 +-
 .../clientpositive/bucketmapjoin_negative.q.out |   2 +-
 .../bucketmapjoin_negative2.q.out               |   2 +-
 .../bucketsortoptimize_insert_3.q.out           |   4 +-
 ql/src/test/results/clientpositive/cast1.q.out  |   6 +-
 ql/src/test/results/clientpositive/char_1.q.out |   8 +-
 .../column_pruner_multiple_children.q.out       | 189 ++++++++++++
 .../clientpositive/dynamic_rdd_cache.q.out      |   4 +-
 .../clientpositive/filter_join_breaktask2.q.out |  46 +--
 .../test/results/clientpositive/fold_case.q.out |   6 +-
 .../test/results/clientpositive/groupby12.q.out |   2 +-
 .../test/results/clientpositive/groupby5.q.out  |   2 +-
 .../clientpositive/groupby5_noskew.q.out        |   2 +-
 .../results/clientpositive/groupby7_map.q.out   |   4 +-
 .../groupby7_map_multi_single_reducer.q.out     |   4 +-
 .../clientpositive/groupby7_map_skew.q.out      |   4 +-
 .../clientpositive/groupby7_noskew.q.out        |   4 +-
 .../groupby7_noskew_multi_single_reducer.q.out  |   4 +-
 .../test/results/clientpositive/groupby8.q.out  |   8 +-
 .../results/clientpositive/groupby8_map.q.out   |   4 +-
 .../clientpositive/groupby8_map_skew.q.out      |   4 +-
 .../clientpositive/groupby8_noskew.q.out        |   4 +-
 .../test/results/clientpositive/groupby9.q.out  |  20 +-
 .../clientpositive/groupby_cube_multi_gby.q.out |   2 +-
 .../clientpositive/groupby_position.q.out       |   8 +-
 .../clientpositive/infer_bucket_sort.q.out      |   6 +-
 .../infer_bucket_sort_grouping_operators.q.out  |  30 +-
 .../infer_bucket_sort_map_operators.q.out       |  53 ++--
 .../infer_bucket_sort_reducers_power_two.q.out  |   6 +-
 ql/src/test/results/clientpositive/input8.q.out |   4 +-
 ql/src/test/results/clientpositive/input9.q.out |   4 +-
 .../results/clientpositive/input_part10.q.out   |  24 +-
 ql/src/test/results/clientpositive/join38.q.out |  22 +-
 .../clientpositive/literal_decimal.q.out        |   6 +-
 .../clientpositive/llap/autoColumnStats_2.q.out |   4 +-
 .../clientpositive/llap/bucketmapjoin1.q.out    |  12 +-
 .../clientpositive/llap/bucketmapjoin2.q.out    |  18 +-
 .../clientpositive/llap/bucketmapjoin3.q.out    |  12 +-
 .../clientpositive/llap/bucketmapjoin4.q.out    |  12 +-
 .../clientpositive/llap/explainuser_2.q.out     | 296 ++++++++++---------
 .../llap/filter_join_breaktask2.q.out           |  46 +--
 .../llap/multi_insert_lateral_view.q.out        | 224 ++++++++------
 .../results/clientpositive/llap/stats11.q.out   |  12 +-
 .../llap/table_access_keys_stats.q.out          |   2 +-
 .../llap/tez_union_multiinsert.q.out            | 182 +++++++-----
 .../clientpositive/llap/unionDistinct_1.q.out   |  42 +--
 .../llap/vector_null_projection.q.out           |  31 +-
 .../clientpositive/llap/vector_nvl.q.out        |  18 +-
 .../llap/vectorization_short_regress.q.out      |   4 +-
 .../clientpositive/multi_insert_mixed.q.out     |   4 +-
 .../results/clientpositive/null_column.q.out    |   4 +-
 .../clientpositive/ppd_constant_expr.q.out      |   8 +-
 .../results/clientpositive/smb_mapjoin_20.q.out |   6 +-
 .../spark/add_part_multiple.q.out               |  16 +-
 .../spark/bucket_map_join_spark1.q.out          |   8 +-
 .../spark/bucket_map_join_spark2.q.out          |   8 +-
 .../spark/bucket_map_join_spark3.q.out          |   8 +-
 .../clientpositive/spark/bucketmapjoin1.q.out   |  12 +-
 .../clientpositive/spark/bucketmapjoin2.q.out   |  18 +-
 .../clientpositive/spark/bucketmapjoin3.q.out   |  12 +-
 .../clientpositive/spark/bucketmapjoin4.q.out   |  12 +-
 .../clientpositive/spark/bucketmapjoin5.q.out   |  12 +-
 .../spark/bucketmapjoin_negative.q.out          |   2 +-
 .../spark/bucketmapjoin_negative2.q.out         |   2 +-
 .../spark/dynamic_rdd_cache.q.out               |   4 +-
 .../spark/filter_join_breaktask2.q.out          |  46 +--
 .../results/clientpositive/spark/groupby5.q.out |   2 +-
 .../clientpositive/spark/groupby5_noskew.q.out  |   2 +-
 .../clientpositive/spark/groupby7_map.q.out     |   4 +-
 .../groupby7_map_multi_single_reducer.q.out     |   4 +-
 .../spark/groupby7_map_skew.q.out               |   4 +-
 .../clientpositive/spark/groupby7_noskew.q.out  |   4 +-
 .../groupby7_noskew_multi_single_reducer.q.out  |   4 +-
 .../results/clientpositive/spark/groupby8.q.out |   8 +-
 .../clientpositive/spark/groupby8_map.q.out     |   4 +-
 .../spark/groupby8_map_skew.q.out               |   4 +-
 .../clientpositive/spark/groupby8_noskew.q.out  |   4 +-
 .../results/clientpositive/spark/groupby9.q.out |  20 +-
 .../clientpositive/spark/groupby_position.q.out |   8 +-
 .../spark/infer_bucket_sort_map_operators.q.out |  54 ++--
 .../results/clientpositive/spark/join38.q.out   |  22 +-
 .../spark/multi_insert_lateral_view.q.out       | 224 ++++++++------
 .../spark/multi_insert_mixed.q.out              |   4 +-
 .../clientpositive/spark/smb_mapjoin_20.q.out   |   6 +-
 .../spark/table_access_keys_stats.q.out         |   2 +-
 .../results/clientpositive/spark/union17.q.out  |  40 ++-
 .../results/clientpositive/spark/union19.q.out  |  20 +-
 .../clientpositive/spark/union_remove_19.q.out  |   4 +-
 .../spark/vectorization_short_regress.q.out     |   4 +-
 .../clientpositive/tez/explainanalyze_2.q.out   | 230 +++++++-------
 ql/src/test/results/clientpositive/udf1.q.out   |  32 +-
 ql/src/test/results/clientpositive/udf3.q.out   |  20 +-
 .../results/clientpositive/udtf_stack.q.out     |  18 +-
 .../test/results/clientpositive/union17.q.out   |  40 ++-
 .../test/results/clientpositive/union19.q.out   |  20 +-
 .../clientpositive/union_remove_19.q.out        |   4 +-
 .../test/results/clientpositive/varchar_1.q.out |   4 +-
 .../clientpositive/vector_null_projection.q.out |  30 +-
 .../results/clientpositive/vector_nvl.q.out     |  18 +-
 112 files changed, 1562 insertions(+), 1079 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java
index 7e39d77..d59603e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java
@@ -158,14 +158,15 @@ public class ColumnPruner extends Transform {
       boolean walkChildren = true;
       opStack.push(nd);
 
-      // no need to go further down for a select op with a file sink or script
-      // child
-      // since all cols are needed for these ops
+      // no need to go further down for a select op with all file sink or 
script
+      // child since all cols are needed for these ops
+      // However, if one of the children is not file sink or script, we still 
go down.
       if (nd instanceof SelectOperator) {
+        walkChildren = false;
         for (Node child : nd.getChildren()) {
-          if ((child instanceof FileSinkOperator)
-              || (child instanceof ScriptOperator)) {
-            walkChildren = false;
+          if (!(child instanceof FileSinkOperator || child instanceof 
ScriptOperator)) {
+            walkChildren = true;
+            break;
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index b2e1c88..bdc5bef 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -7446,11 +7446,9 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
             rowFields.get(rowFieldsOffset).getInternalName(), "", false,
             rowFields.get(rowFieldsOffset).isSkewedCol());
         // LazySimpleSerDe can convert any types to String type using
-        // JSON-format.
-        if (!tableFieldTypeInfo.equals(rowFieldTypeInfo)
-            && !(isLazySimpleSerDe
-                && tableFieldTypeInfo.getCategory().equals(Category.PRIMITIVE) 
&& tableFieldTypeInfo
-                  .equals(TypeInfoFactory.stringTypeInfo))) {
+        // JSON-format. However, we may add more operators.
+        // Thus, we still keep the conversion.
+        if (!tableFieldTypeInfo.equals(rowFieldTypeInfo)) {
           // need to do some conversions here
           converted = true;
           if (tableFieldTypeInfo.getCategory() != Category.PRIMITIVE) {

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/queries/clientpositive/column_pruner_multiple_children.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/column_pruner_multiple_children.q 
b/ql/src/test/queries/clientpositive/column_pruner_multiple_children.q
new file mode 100644
index 0000000..9315239
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/column_pruner_multiple_children.q
@@ -0,0 +1,19 @@
+set hive.map.aggr=false;
+set hive.stats.column.autogather=true;
+
+CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
+
+create table s as select * from src where key='10';
+
+explain FROM S
+INSERT OVERWRITE TABLE DEST1 SELECT key, sum(SUBSTR(value,5)) GROUP BY key
+;
+
+FROM S
+INSERT OVERWRITE TABLE DEST1 SELECT key, sum(SUBSTR(value,5)) GROUP BY key
+;
+
+desc formatted DEST1;
+
+desc formatted DEST1 key;
+desc formatted DEST1 value;

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientnegative/udf_assert_true.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/udf_assert_true.q.out 
b/ql/src/test/results/clientnegative/udf_assert_true.q.out
index 7fc50d6..a55e9bf 100644
--- a/ql/src/test/results/clientnegative/udf_assert_true.q.out
+++ b/ql/src/test/results/clientnegative/udf_assert_true.q.out
@@ -32,13 +32,17 @@ STAGE PLANS:
                     Limit
                       Number of rows: 2
                       Statistics: Num rows: 2 Data size: 8 Basic stats: 
COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 2 Data size: 8 Basic stats: 
COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      Select Operator
+                        expressions: UDFToString(_col0) (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 2 Data size: 168 Basic stats: 
COMPLETE Column stats: COMPLETE
+                        File Output Operator
+                          compressed: false
+                          Statistics: Num rows: 2 Data size: 168 Basic stats: 
COMPLETE Column stats: COMPLETE
+                          table:
+                              input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                              output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                              serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               Select Operator
                 expressions: array(1,2) (type: array<int>)
                 outputColumnNames: _col0
@@ -56,13 +60,17 @@ STAGE PLANS:
                       Limit
                         Number of rows: 2
                         Statistics: Num rows: 2 Data size: 8 Basic stats: 
COMPLETE Column stats: COMPLETE
-                        File Output Operator
-                          compressed: false
-                          Statistics: Num rows: 2 Data size: 8 Basic stats: 
COMPLETE Column stats: COMPLETE
-                          table:
-                              input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                              output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                              serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        Select Operator
+                          expressions: UDFToString(_col0) (type: string)
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 2 Data size: 168 Basic stats: 
COMPLETE Column stats: COMPLETE
+                          File Output Operator
+                            compressed: false
+                            Statistics: Num rows: 2 Data size: 168 Basic 
stats: COMPLETE Column stats: COMPLETE
+                            table:
+                                input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                                output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                                serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -109,13 +117,17 @@ STAGE PLANS:
                     Limit
                       Number of rows: 2
                       Statistics: Num rows: 2 Data size: 8 Basic stats: 
COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 2 Data size: 8 Basic stats: 
COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      Select Operator
+                        expressions: UDFToString(_col0) (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 2 Data size: 168 Basic stats: 
COMPLETE Column stats: COMPLETE
+                        File Output Operator
+                          compressed: false
+                          Statistics: Num rows: 2 Data size: 168 Basic stats: 
COMPLETE Column stats: COMPLETE
+                          table:
+                              input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                              output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                              serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               Select Operator
                 expressions: array(1,2) (type: array<int>)
                 outputColumnNames: _col0
@@ -133,13 +145,17 @@ STAGE PLANS:
                       Limit
                         Number of rows: 2
                         Statistics: Num rows: 2 Data size: 8 Basic stats: 
COMPLETE Column stats: COMPLETE
-                        File Output Operator
-                          compressed: false
-                          Statistics: Num rows: 2 Data size: 8 Basic stats: 
COMPLETE Column stats: COMPLETE
-                          table:
-                              input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                              output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                              serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        Select Operator
+                          expressions: UDFToString(_col0) (type: string)
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 2 Data size: 168 Basic stats: 
COMPLETE Column stats: COMPLETE
+                          File Output Operator
+                            compressed: false
+                            Statistics: Num rows: 2 Data size: 168 Basic 
stats: COMPLETE Column stats: COMPLETE
+                            table:
+                                input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                                output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                                serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/add_part_multiple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/add_part_multiple.q.out 
b/ql/src/test/results/clientpositive/add_part_multiple.q.out
index c452223..1e01b26 100644
--- a/ql/src/test/results/clientpositive/add_part_multiple.q.out
+++ b/ql/src/test/results/clientpositive/add_part_multiple.q.out
@@ -71,14 +71,14 @@ POSTHOOK: Output: default@add_part_test@ds=2010-01-01
 POSTHOOK: Output: default@add_part_test@ds=2010-02-01
 POSTHOOK: Output: default@add_part_test@ds=2010-03-01
 POSTHOOK: Output: default@add_part_test@ds=2010-04-01
-POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-01-01).key SIMPLE []
-POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-01-01).value SIMPLE []
-POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-02-01).key SIMPLE []
-POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-02-01).value SIMPLE []
-POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-03-01).key SIMPLE []
-POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-03-01).value SIMPLE []
-POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-04-01).key SIMPLE []
-POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-04-01).value SIMPLE []
+POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-01-01).key EXPRESSION []
+POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-01-01).value EXPRESSION []
+POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-02-01).key EXPRESSION []
+POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-02-01).value EXPRESSION []
+POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-03-01).key EXPRESSION []
+POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-03-01).value EXPRESSION []
+POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-04-01).key EXPRESSION []
+POSTHOOK: Lineage: add_part_test PARTITION(ds=2010-04-01).value EXPRESSION []
 PREHOOK: query: select * from add_part_test
 PREHOOK: type: QUERY
 PREHOOK: Input: default@add_part_test

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_partition_coltype.q.out 
b/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
index daa6255..426a4de 100644
--- a/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
+++ b/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
@@ -442,7 +442,7 @@ POSTHOOK: query: insert into table 
pt.alterdynamic_part_table partition(partcol1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: pt@alterdynamic_part_table@partcol1=1/partcol2=1
-POSTHOOK: Lineage: alterdynamic_part_table 
PARTITION(partcol1=1,partcol2=1).intcol SIMPLE []
+POSTHOOK: Lineage: alterdynamic_part_table 
PARTITION(partcol1=1,partcol2=1).intcol EXPRESSION []
 PREHOOK: query: alter table pt.alterdynamic_part_table partition column 
(partcol1 int)
 PREHOOK: type: ALTERTABLE_PARTCOLTYPE
 PREHOOK: Input: pt@alterdynamic_part_table

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/annotate_stats_select.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_select.q.out 
b/ql/src/test/results/clientpositive/annotate_stats_select.q.out
index 873f1ab..73e0910 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_select.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_select.q.out
@@ -260,9 +260,9 @@ STAGE PLANS:
           alias: alltypes_orc
           Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column 
stats: COMPLETE
           Select Operator
-            expressions: null (type: void)
+            expressions: null (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column 
stats: COMPLETE
+            Statistics: Num rows: 2 Data size: 84 Basic stats: COMPLETE Column 
stats: COMPLETE
             ListSink
 
 PREHOOK: query: explain select 11 from alltypes_orc

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/autoColumnStats_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_7.q.out 
b/ql/src/test/results/clientpositive/autoColumnStats_7.q.out
index 82768d1..9d24bc5 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_7.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_7.q.out
@@ -129,7 +129,7 @@ STAGE PLANS:
               sort order: 
               Map-reduce partition columns: rand() (type: double)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
-              value expressions: key (type: string), 16 (type: int), c1 (type: 
int), c2 (type: string)
+              value expressions: key (type: string), c1 (type: int), c2 (type: 
string)
       Reduce Operator Tree:
         Group By Operator
           aggregations: compute_stats(VALUE._col0, 16), 
compute_stats(VALUE._col2, 16), compute_stats(VALUE._col3, 16)

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/autoColumnStats_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_8.q.out 
b/ql/src/test/results/clientpositive/autoColumnStats_8.q.out
index c6f6127..329a897 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_8.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_8.q.out
@@ -157,8 +157,8 @@ STAGE PLANS:
                   GatherStats: true
                   MultiFileSpray: false
                 Select Operator
-                  expressions: _col0 (type: string), _col1 (type: string), 
'2008-12-31' (type: string), _col2 (type: string)
-                  outputColumnNames: key, value, ds, hr
+                  expressions: _col2 (type: string), _col0 (type: string), 
_col1 (type: string)
+                  outputColumnNames: hr, key, value
                   Statistics: Num rows: 666 Data size: 7075 Basic stats: 
COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: compute_stats(key, 16), compute_stats(value, 
16)

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/ba_table_udfs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ba_table_udfs.q.out 
b/ql/src/test/results/clientpositive/ba_table_udfs.q.out
index 1682631..0a48b09 100644
--- a/ql/src/test/results/clientpositive/ba_table_udfs.q.out
+++ b/ql/src/test/results/clientpositive/ba_table_udfs.q.out
@@ -49,7 +49,7 @@ POSTHOOK: Input: default@dest1
 POSTHOOK: Output: default@dest1
 POSTHOOK: Lineage: dest1.bytes1 EXPRESSION []
 POSTHOOK: Lineage: dest1.bytes2 EXPRESSION []
-POSTHOOK: Lineage: dest1.string SIMPLE []
+POSTHOOK: Lineage: dest1.string EXPRESSION []
 PREHOOK: query: SELECT
   bytes1,
   bytes2,

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out 
b/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out
index ba79294..5478f12 100644
--- a/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out
+++ b/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out
@@ -222,7 +222,7 @@ STAGE PLANS:
                   Position of Big Table: 1
                   Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: _col0 (type: int), _col1 (type: string), 
_col3 (type: string)
+                    expressions: UDFToString(_col0) (type: string), _col1 
(type: string), _col3 (type: string)
                     outputColumnNames: _col0, _col1, _col2
                     Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
@@ -413,7 +413,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
@@ -556,7 +556,7 @@ STAGE PLANS:
                   Position of Big Table: 1
                   Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: _col0 (type: int), _col1 (type: string), 
_col3 (type: string)
+                    expressions: UDFToString(_col0) (type: string), _col1 
(type: string), _col3 (type: string)
                     outputColumnNames: _col0, _col1, _col2
                     Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
@@ -747,7 +747,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out 
b/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out
index 8974d4c..e335683 100644
--- a/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out
+++ b/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out
@@ -206,7 +206,7 @@ STAGE PLANS:
                   Position of Big Table: 0
                   Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: _col0 (type: int), _col1 (type: string), 
_col3 (type: string)
+                    expressions: UDFToString(_col0) (type: string), _col1 
(type: string), _col3 (type: string)
                     outputColumnNames: _col0, _col1, _col2
                     Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
@@ -397,7 +397,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
@@ -540,7 +540,7 @@ STAGE PLANS:
                   Position of Big Table: 0
                   Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: _col0 (type: int), _col1 (type: string), 
_col3 (type: string)
+                    expressions: UDFToString(_col0) (type: string), _col1 
(type: string), _col3 (type: string)
                     outputColumnNames: _col0, _col1, _col2
                     Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
@@ -731,7 +731,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out 
b/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out
index 0453f99..bd5a438 100644
--- a/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out
+++ b/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out
@@ -206,7 +206,7 @@ STAGE PLANS:
                   Position of Big Table: 1
                   Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: _col0 (type: int), _col1 (type: string), 
_col3 (type: string)
+                    expressions: UDFToString(_col0) (type: string), _col1 
(type: string), _col3 (type: string)
                     outputColumnNames: _col0, _col1, _col2
                     Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
@@ -397,7 +397,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
@@ -540,7 +540,7 @@ STAGE PLANS:
                   Position of Big Table: 1
                   Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: _col0 (type: int), _col1 (type: string), 
_col3 (type: string)
+                    expressions: UDFToString(_col0) (type: string), _col1 
(type: string), _col3 (type: string)
                     outputColumnNames: _col0, _col1, _col2
                     Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
@@ -731,7 +731,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/bucketmapjoin5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucketmapjoin5.q.out 
b/ql/src/test/results/clientpositive/bucketmapjoin5.q.out
index b108607..febc2b6 100644
--- a/ql/src/test/results/clientpositive/bucketmapjoin5.q.out
+++ b/ql/src/test/results/clientpositive/bucketmapjoin5.q.out
@@ -237,7 +237,7 @@ STAGE PLANS:
                 Statistics: Num rows: 121 Data size: 12786 Basic stats: 
COMPLETE Column stats: NONE
                 BucketMapJoin: true
                 Select Operator
-                  expressions: _col0 (type: int), _col1 (type: string), _col6 
(type: string)
+                  expressions: UDFToString(_col0) (type: string), _col1 (type: 
string), _col6 (type: string)
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 121 Data size: 12786 Basic stats: 
COMPLETE Column stats: NONE
                   File Output Operator
@@ -618,7 +618,7 @@ POSTHOOK: Input: default@srcbucket_mapjoin_part
 POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-09
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
@@ -663,7 +663,7 @@ POSTHOOK: Input: default@srcbucket_mapjoin_part
 POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-09
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
@@ -778,7 +778,7 @@ STAGE PLANS:
                 Statistics: Num rows: 63 Data size: 6736 Basic stats: COMPLETE 
Column stats: NONE
                 BucketMapJoin: true
                 Select Operator
-                  expressions: _col0 (type: int), _col1 (type: string), _col6 
(type: string)
+                  expressions: UDFToString(_col0) (type: string), _col1 (type: 
string), _col6 (type: string)
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 63 Data size: 6736 Basic stats: 
COMPLETE Column stats: NONE
                   File Output Operator
@@ -1159,7 +1159,7 @@ POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-09
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
@@ -1204,7 +1204,7 @@ POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-09
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out 
b/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out
index 4aa7f82..ee075c1 100644
--- a/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out
+++ b/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out
@@ -177,7 +177,7 @@ STAGE PLANS:
                 Position of Big Table: 0
                 Statistics: Num rows: 44 Data size: 4620 Basic stats: COMPLETE 
Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: int), _col1 (type: string), _col6 
(type: string)
+                  expressions: UDFToString(_col0) (type: string), _col1 (type: 
string), _col6 (type: string)
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 44 Data size: 4620 Basic stats: 
COMPLETE Column stats: NONE
                   File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out 
b/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out
index 09b5af9..2728bb7 100644
--- a/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out
+++ b/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out
@@ -240,7 +240,7 @@ STAGE PLANS:
                 Statistics: Num rows: 63 Data size: 6736 Basic stats: COMPLETE 
Column stats: NONE
                 BucketMapJoin: true
                 Select Operator
-                  expressions: _col0 (type: int), _col1 (type: string), _col6 
(type: string)
+                  expressions: UDFToString(_col0) (type: string), _col1 (type: 
string), _col6 (type: string)
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 63 Data size: 6736 Basic stats: 
COMPLETE Column stats: NONE
                   File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out 
b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out
index 11c7c39..c5e03be 100644
--- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out
+++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out
@@ -171,7 +171,7 @@ STAGE PLANS:
                 value expressions: _col0 (type: int), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
-          expressions: VALUE._col0 (type: int), UDFToInteger(VALUE._col1) 
(type: int)
+          expressions: UDFToString(VALUE._col0) (type: string), 
UDFToInteger(VALUE._col1) (type: int)
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
           File Output Operator
@@ -213,7 +213,7 @@ POSTHOOK: Input: default@test_table1
 POSTHOOK: Input: default@test_table1@ds=1
 POSTHOOK: Output: default@test_table2@ds=1
 POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION 
[(test_table1)a.FieldSchema(name:value, type:string, comment:null), ]
-POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE 
[(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value EXPRESSION 
[(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
 PREHOOK: query: select count(*) from test_table2 where ds = '1'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@test_table2

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/cast1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cast1.q.out 
b/ql/src/test/results/clientpositive/cast1.q.out
index 1a246c0..9feb14f 100644
--- a/ql/src/test/results/clientpositive/cast1.q.out
+++ b/ql/src/test/results/clientpositive/cast1.q.out
@@ -33,7 +33,7 @@ STAGE PLANS:
               predicate: (UDFToDouble(key) = 86.0) (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
               Select Operator
-                expressions: 5 (type: int), 5.0 (type: double), 5.0 (type: 
double), 5.0 (type: double), 5 (type: int), true (type: boolean), 1 (type: int)
+                expressions: 5 (type: int), 5.0 (type: double), 5.0 (type: 
double), 5.0 (type: double), 5 (type: int), 'TRUE' (type: string), 1 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
                 File Output Operator
@@ -110,7 +110,7 @@ POSTHOOK: Lineage: dest1.c2 EXPRESSION []
 POSTHOOK: Lineage: dest1.c3 EXPRESSION []
 POSTHOOK: Lineage: dest1.c4 EXPRESSION []
 POSTHOOK: Lineage: dest1.c5 SIMPLE []
-POSTHOOK: Lineage: dest1.c6 SIMPLE []
+POSTHOOK: Lineage: dest1.c6 EXPRESSION []
 POSTHOOK: Lineage: dest1.c7 SIMPLE []
 PREHOOK: query: select dest1.* FROM dest1
 PREHOOK: type: QUERY
@@ -120,4 +120,4 @@ POSTHOOK: query: select dest1.* FROM dest1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dest1
 #### A masked pattern was here ####
-5      5.0     5.0     5.0     5       true    1
+5      5.0     5.0     5.0     5       TRUE    1

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/char_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/char_1.q.out 
b/ql/src/test/results/clientpositive/char_1.q.out
index 2f155a8..06fbe67 100644
--- a/ql/src/test/results/clientpositive/char_1.q.out
+++ b/ql/src/test/results/clientpositive/char_1.q.out
@@ -94,8 +94,8 @@ POSTHOOK: query: insert overwrite table char1_1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@char1
 POSTHOOK: Output: default@char1_1
-POSTHOOK: Lineage: char1_1.key SIMPLE [(char1)char1.FieldSchema(name:key, 
type:char(10), comment:null), ]
-POSTHOOK: Lineage: char1_1.value SIMPLE [(char1)char1.FieldSchema(name:value, 
type:char(20), comment:null), ]
+POSTHOOK: Lineage: char1_1.key EXPRESSION [(char1)char1.FieldSchema(name:key, 
type:char(10), comment:null), ]
+POSTHOOK: Lineage: char1_1.value EXPRESSION 
[(char1)char1.FieldSchema(name:value, type:char(20), comment:null), ]
 PREHOOK: query: select key, value from char1_1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@char1_1
@@ -104,8 +104,8 @@ POSTHOOK: query: select key, value from char1_1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@char1_1
 #### A masked pattern was here ####
-0              val_0               
-0              val_0               
+0      val_0
+0      val_0
 PREHOOK: query: insert overwrite table char1 
   select key, cast(value as char(3)) from src order by key, value limit 2
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/column_pruner_multiple_children.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/column_pruner_multiple_children.q.out 
b/ql/src/test/results/clientpositive/column_pruner_multiple_children.q.out
new file mode 100644
index 0000000..96feeed
--- /dev/null
+++ b/ql/src/test/results/clientpositive/column_pruner_multiple_children.q.out
@@ -0,0 +1,189 @@
+PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@DEST1
+POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@DEST1
+PREHOOK: query: create table s as select * from src where key='10'
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@s
+POSTHOOK: query: create table s as select * from src where key='10'
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@s
+POSTHOOK: Lineage: s.key SIMPLE []
+POSTHOOK: Lineage: s.value SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
+PREHOOK: query: explain FROM S
+INSERT OVERWRITE TABLE DEST1 SELECT key, sum(SUBSTR(value,5)) GROUP BY key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain FROM S
+INSERT OVERWRITE TABLE DEST1 SELECT key, sum(SUBSTR(value,5)) GROUP BY key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+  Stage-4 depends on stages: Stage-2, Stage-3
+  Stage-3 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: s
+            Statistics: Num rows: 1 Data size: 9 Basic stats: COMPLETE Column 
stats: NONE
+            Select Operator
+              expressions: key (type: string), substr(value, 5) (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 1 Data size: 9 Basic stats: COMPLETE 
Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 1 Data size: 9 Basic stats: COMPLETE 
Column stats: NONE
+                value expressions: _col1 (type: string)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: sum(VALUE._col0)
+          keys: KEY._col0 (type: string)
+          mode: complete
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 1 Data size: 9 Basic stats: COMPLETE Column 
stats: NONE
+          Select Operator
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 1 Data size: 9 Basic stats: COMPLETE Column 
stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 1 Data size: 9 Basic stats: COMPLETE 
Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.dest1
+            Select Operator
+              expressions: _col0 (type: int), _col1 (type: string)
+              outputColumnNames: key, value
+              Statistics: Num rows: 1 Data size: 9 Basic stats: COMPLETE 
Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+  Stage: Stage-4
+    Column Stats Work
+      Column Stats Desc:
+          Columns: key, value
+          Column Types: int, string
+          Table: default.dest1
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 9 Basic stats: COMPLETE 
Column stats: NONE
+              value expressions: key (type: int), value (type: string)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0, 16), 
compute_stats(VALUE._col2, 16)
+          mode: complete
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 1 Data size: 960 Basic stats: COMPLETE Column 
stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 960 Basic stats: COMPLETE 
Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+PREHOOK: query: FROM S
+INSERT OVERWRITE TABLE DEST1 SELECT key, sum(SUBSTR(value,5)) GROUP BY key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@s
+PREHOOK: Output: default@dest1
+POSTHOOK: query: FROM S
+INSERT OVERWRITE TABLE DEST1 SELECT key, sum(SUBSTR(value,5)) GROUP BY key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@s
+POSTHOOK: Output: default@dest1
+POSTHOOK: Lineage: dest1.key EXPRESSION [(s)s.FieldSchema(name:key, 
type:string, comment:null), ]
+POSTHOOK: Lineage: dest1.value EXPRESSION [(s)s.FieldSchema(name:value, 
type:string, comment:null), ]
+PREHOOK: query: desc formatted DEST1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@dest1
+POSTHOOK: query: desc formatted DEST1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@dest1
+# col_name             data_type               comment             
+                
+key                    int                                         
+value                  string                                      
+                
+# Detailed Table Information            
+Database:              default                  
+#### A masked pattern was here ####
+Retention:             0                        
+#### A masked pattern was here ####
+Table Type:            MANAGED_TABLE            
+Table Parameters:               
+       COLUMN_STATS_ACCURATE   
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+       numFiles                1                   
+       numRows                 1                   
+       rawDataSize             7                   
+       totalSize               8                   
+#### A masked pattern was here ####
+                
+# Storage Information           
+SerDe Library:         org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe      
 
+InputFormat:           org.apache.hadoop.mapred.TextInputFormat         
+OutputFormat:          
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat       
+Compressed:            No                       
+Num Buckets:           -1                       
+Bucket Columns:        []                       
+Sort Columns:          []                       
+Storage Desc Params:            
+       serialization.format    1                   
+PREHOOK: query: desc formatted DEST1 key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@dest1
+POSTHOOK: query: desc formatted DEST1 key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@dest1
+# col_name             data_type               min                     max     
                num_nulls               distinct_count          avg_col_len     
        max_col_len             num_trues               num_falses              
comment             
+                                                                               
 
+key                    int                     10                      10      
                0                       1                                       
                                                                                
from deserializer   
+COLUMN_STATS_ACCURATE  
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
                                                                        
+PREHOOK: query: desc formatted DEST1 value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@dest1
+POSTHOOK: query: desc formatted DEST1 value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@dest1
+# col_name             data_type               min                     max     
                num_nulls               distinct_count          avg_col_len     
        max_col_len             num_trues               num_falses              
comment             
+                                                                               
 
+value                  string                                                  
                0                       1                       4.0             
        4                                                                       
from deserializer   
+COLUMN_STATS_ACCURATE  
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
                                                                        

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out 
b/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out
index 87166a7..cf31ec1 100644
--- a/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out
+++ b/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out
@@ -271,7 +271,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), _col1 (type: double)
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator
@@ -314,7 +314,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), _col1 (type: double)
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/filter_join_breaktask2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/filter_join_breaktask2.q.out 
b/ql/src/test/results/clientpositive/filter_join_breaktask2.q.out
index 6a2396e..26ce0ed 100644
--- a/ql/src/test/results/clientpositive/filter_join_breaktask2.q.out
+++ b/ql/src/test/results/clientpositive/filter_join_breaktask2.q.out
@@ -44,9 +44,9 @@ POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c1 SIMPLE []
 POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c2 SIMPLE []
 POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c3 SIMPLE []
 POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c4 SIMPLE []
-POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c5 SIMPLE []
-POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c6 SIMPLE []
-POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c7 SIMPLE []
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c5 EXPRESSION []
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c6 EXPRESSION []
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c7 EXPRESSION []
 PREHOOK: query: insert overwrite table T2 partition(ds='2010-04-17') select 
'5','name', NULL, '2', 'kavin',NULL, '9', 'c', '8', '0', '0', '7', '1','2', 
'0', '3','2', NULL, '1', NULL, '3','2','0','0','5','10' from src tablesample (1 
rows)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
@@ -64,9 +64,9 @@ POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c13 SIMPLE []
 POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c14 SIMPLE []
 POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c15 SIMPLE []
 POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c16 SIMPLE []
-POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c17 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c17 EXPRESSION []
 POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c18 SIMPLE []
-POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c19 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c19 EXPRESSION []
 POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c2 SIMPLE []
 POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c20 SIMPLE []
 POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c21 SIMPLE []
@@ -74,9 +74,9 @@ POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c22 SIMPLE []
 POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c23 SIMPLE []
 POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c24 SIMPLE []
 POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c25 SIMPLE []
-POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c3 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c3 EXPRESSION []
 POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c4 SIMPLE []
-POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c5 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c5 EXPRESSION []
 POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c6 SIMPLE []
 POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c7 SIMPLE []
 POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c8 SIMPLE []
@@ -118,18 +118,18 @@ POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c2 SIMPLE 
[]
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c20 SIMPLE []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c21 SIMPLE []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c22 SIMPLE []
-POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c23 SIMPLE []
-POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c24 SIMPLE []
-POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c25 SIMPLE []
-POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c26 SIMPLE []
-POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c27 SIMPLE []
-POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c28 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c23 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c24 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c25 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c26 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c27 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c28 EXPRESSION []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c29 SIMPLE []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c3 SIMPLE []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c30 SIMPLE []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c31 SIMPLE []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c32 SIMPLE []
-POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c33 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c33 EXPRESSION []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c34 SIMPLE []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c35 SIMPLE []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c36 SIMPLE []
@@ -157,18 +157,18 @@ POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c55 SIMPLE 
[]
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c56 SIMPLE []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c57 SIMPLE []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c58 SIMPLE []
-POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c59 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c59 EXPRESSION []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c6 SIMPLE []
-POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c60 SIMPLE []
-POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c61 SIMPLE []
-POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c62 SIMPLE []
-POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c63 SIMPLE []
-POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c64 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c60 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c61 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c62 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c63 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c64 EXPRESSION []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c65 SIMPLE []
-POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c66 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c66 EXPRESSION []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c67 EXPRESSION []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c68 SIMPLE []
-POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c69 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c69 EXPRESSION []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c7 SIMPLE []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c70 EXPRESSION []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c71 EXPRESSION []
@@ -181,7 +181,7 @@ POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c77 SIMPLE []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c78 SIMPLE []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c79 SIMPLE []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c8 SIMPLE []
-POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c80 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c80 EXPRESSION []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c81 EXPRESSION []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c82 EXPRESSION []
 POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c83 EXPRESSION []

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/fold_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/fold_case.q.out 
b/ql/src/test/results/clientpositive/fold_case.q.out
index b2f9807..0aa5b9a 100644
--- a/ql/src/test/results/clientpositive/fold_case.q.out
+++ b/ql/src/test/results/clientpositive/fold_case.q.out
@@ -381,12 +381,12 @@ STAGE PLANS:
                 predicate: false (type: boolean)
                 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE 
Column stats: COMPLETE
                 Select Operator
-                  expressions: null (type: void)
+                  expressions: null (type: string)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE 
Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE 
Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE 
Column stats: COMPLETE
+                    Statistics: Num rows: 1 Data size: 84 Basic stats: 
COMPLETE Column stats: COMPLETE
                     table:
                         input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/groupby12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby12.q.out 
b/ql/src/test/results/clientpositive/groupby12.q.out
index b17da54..921fc92 100644
--- a/ql/src/test/results/clientpositive/groupby12.q.out
+++ b/ql/src/test/results/clientpositive/groupby12.q.out
@@ -43,7 +43,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col1) (type: int), _col2 (type: bigint)
+            expressions: UDFToInteger(_col1) (type: int), UDFToString(_col2) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/groupby5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby5.q.out 
b/ql/src/test/results/clientpositive/groupby5.q.out
index d6efd2c..9bf01ee 100644
--- a/ql/src/test/results/clientpositive/groupby5.q.out
+++ b/ql/src/test/results/clientpositive/groupby5.q.out
@@ -73,7 +73,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), _col1 (type: double)
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/groupby5_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby5_noskew.q.out 
b/ql/src/test/results/clientpositive/groupby5_noskew.q.out
index 3c1688c..612a0f6 100644
--- a/ql/src/test/results/clientpositive/groupby5_noskew.q.out
+++ b/ql/src/test/results/clientpositive/groupby5_noskew.q.out
@@ -48,7 +48,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), _col1 (type: double)
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/groupby7_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby7_map.q.out 
b/ql/src/test/results/clientpositive/groupby7_map.q.out
index 29e3113..0ef29cd 100644
--- a/ql/src/test/results/clientpositive/groupby7_map.q.out
+++ b/ql/src/test/results/clientpositive/groupby7_map.q.out
@@ -79,7 +79,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), _col1 (type: double)
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator
@@ -122,7 +122,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), _col1 (type: double)
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out 
b/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out
index 377f275..7c3b033 100644
--- a/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out
@@ -58,7 +58,7 @@ STAGE PLANS:
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
-              expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
double)
+              expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
               File Output Operator
@@ -76,7 +76,7 @@ STAGE PLANS:
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
-              expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
double)
+              expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
               File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/groupby7_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby7_map_skew.q.out 
b/ql/src/test/results/clientpositive/groupby7_map_skew.q.out
index bd0f347..4bfa52e 100644
--- a/ql/src/test/results/clientpositive/groupby7_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/groupby7_map_skew.q.out
@@ -105,7 +105,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), _col1 (type: double)
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator
@@ -172,7 +172,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), _col1 (type: double)
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/groupby7_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby7_noskew.q.out 
b/ql/src/test/results/clientpositive/groupby7_noskew.q.out
index 71321f4..6178f58 100644
--- a/ql/src/test/results/clientpositive/groupby7_noskew.q.out
+++ b/ql/src/test/results/clientpositive/groupby7_noskew.q.out
@@ -67,7 +67,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), _col1 (type: double)
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator
@@ -110,7 +110,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), _col1 (type: double)
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out 
b/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out
index 8181d8b..f38c428 100644
--- 
a/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out
+++ 
b/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out
@@ -97,7 +97,7 @@ STAGE PLANS:
             Number of rows: 10
             Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
-              expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
double)
+              expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
               File Output Operator
@@ -141,7 +141,7 @@ STAGE PLANS:
             Number of rows: 10
             Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
-              expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
double)
+              expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE 
Column stats: NONE
               File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/groupby8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby8.q.out 
b/ql/src/test/results/clientpositive/groupby8.q.out
index abf7256..1856a92 100644
--- a/ql/src/test/results/clientpositive/groupby8.q.out
+++ b/ql/src/test/results/clientpositive/groupby8.q.out
@@ -92,7 +92,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint)
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator
@@ -158,7 +158,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint)
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator
@@ -913,7 +913,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint)
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator
@@ -979,7 +979,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint)
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/groupby8_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby8_map.q.out 
b/ql/src/test/results/clientpositive/groupby8_map.q.out
index 48b8b34..f683a8b 100644
--- a/ql/src/test/results/clientpositive/groupby8_map.q.out
+++ b/ql/src/test/results/clientpositive/groupby8_map.q.out
@@ -57,7 +57,7 @@ STAGE PLANS:
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
-              expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
bigint)
+              expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
               File Output Operator
@@ -75,7 +75,7 @@ STAGE PLANS:
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
-              expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
bigint)
+              expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
               File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/groupby8_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby8_map_skew.q.out 
b/ql/src/test/results/clientpositive/groupby8_map_skew.q.out
index 3b63a40..5e60d3e 100644
--- a/ql/src/test/results/clientpositive/groupby8_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/groupby8_map_skew.q.out
@@ -104,7 +104,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint)
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator
@@ -170,7 +170,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint)
+            expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/groupby8_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby8_noskew.q.out 
b/ql/src/test/results/clientpositive/groupby8_noskew.q.out
index 48b8b34..f683a8b 100644
--- a/ql/src/test/results/clientpositive/groupby8_noskew.q.out
+++ b/ql/src/test/results/clientpositive/groupby8_noskew.q.out
@@ -57,7 +57,7 @@ STAGE PLANS:
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
-              expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
bigint)
+              expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
               File Output Operator
@@ -75,7 +75,7 @@ STAGE PLANS:
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
-              expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
bigint)
+              expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
               File Output Operator

Reply via email to