http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/values.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-planner/queries/PlannerTest/values.test 
b/testdata/workloads/functional-planner/queries/PlannerTest/values.test
index 87e9aa8..ffa5632 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/values.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/values.test
@@ -4,11 +4,13 @@ PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=1
+   row-size=18B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=1
+   row-size=18B cardinality=1
 ====
 values(1+1, 2, 5.0, 'a') order by 1 limit 10
 ---- PLAN
@@ -16,17 +18,21 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=10]
 |  order by: 2 ASC
+|  row-size=18B cardinality=1
 |
 00:UNION
    constant-operands=1
+   row-size=18B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=10]
 |  order by: 2 ASC
+|  row-size=18B cardinality=1
 |
 00:UNION
    constant-operands=1
+   row-size=18B cardinality=1
 ====
 values((1+1, 2, 5.0, 'a'), (2, 3, 6.0, 'b'), (3, 4, 7.0, 'c'))
 ---- PLAN
@@ -34,11 +40,13 @@ PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=3
+   row-size=18B cardinality=3
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=3
+   row-size=18B cardinality=3
 ====
 values((1+1, 2, 5.0, 'a'), (2, 3, 6.0, 'b'), (3, 4, 7.0, 'c')) order by 1 
limit 10
 ---- PLAN
@@ -46,15 +54,19 @@ PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=10]
 |  order by: 2 ASC
+|  row-size=18B cardinality=3
 |
 00:UNION
    constant-operands=3
+   row-size=18B cardinality=3
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 01:TOP-N [LIMIT=10]
 |  order by: 2 ASC
+|  row-size=18B cardinality=3
 |
 00:UNION
    constant-operands=3
+   row-size=18B cardinality=3
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/views.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-planner/queries/PlannerTest/views.test 
b/testdata/workloads/functional-planner/queries/PlannerTest/views.test
index 5caeab5..0057e84 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/views.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/views.test
@@ -5,6 +5,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -12,6 +13,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ====
 # Basic test with a complex view.
 select * from functional.complex_view
@@ -20,23 +22,28 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=100]
 |  order by: b.string_col ASC
+|  row-size=21B cardinality=0
 |
 03:AGGREGATE [FINALIZE]
 |  output: count(a.bigint_col)
 |  group by: b.string_col
 |  having: count(a.bigint_col) > 1
+|  row-size=21B cardinality=0
 |
 02:HASH JOIN [INNER JOIN]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
+|  row-size=29B cardinality=1
 |
 |--01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
+|     row-size=17B cardinality=8
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    predicates: a.bigint_col < 50
    runtime filters: RF000 -> a.id
+   row-size=12B cardinality=1.10K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -46,31 +53,37 @@ PLAN-ROOT SINK
 |
 04:TOP-N [LIMIT=100]
 |  order by: b.string_col ASC
+|  row-size=21B cardinality=0
 |
 07:AGGREGATE [FINALIZE]
 |  output: count:merge(a.bigint_col)
 |  group by: b.string_col
 |  having: count(a.bigint_col) > 1
+|  row-size=21B cardinality=0
 |
 06:EXCHANGE [HASH(b.string_col)]
 |
 03:AGGREGATE [STREAMING]
 |  output: count(a.bigint_col)
 |  group by: b.string_col
+|  row-size=21B cardinality=1
 |
 02:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: a.id = b.id
 |  runtime filters: RF000 <- b.id
+|  row-size=29B cardinality=1
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny b]
 |     partitions=4/4 files=4 size=460B
+|     row-size=17B cardinality=8
 |
 00:SCAN HDFS [functional.alltypesagg a]
    partitions=11/11 files=11 size=814.73KB
    predicates: a.bigint_col < 50
    runtime filters: RF000 -> a.id
+   row-size=12B cardinality=1.10K
 ====
 # Basic test with a view on a view
 select int_col, string_col from functional.view_view
@@ -79,6 +92,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -86,6 +100,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=17B cardinality=7.30K
 ====
 # view used in a union.
 select * from functional.alltypes_view union all
@@ -94,26 +109,32 @@ select * from functional.alltypes_view where id < 10
 PLAN-ROOT SINK
 |
 00:UNION
+|  row-size=89B cardinality=8.03K
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: functional.alltypes.id < 10
+|     row-size=89B cardinality=730
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:EXCHANGE [UNPARTITIONED]
 |
 00:UNION
+|  row-size=89B cardinality=8.03K
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: functional.alltypes.id < 10
+|     row-size=89B cardinality=730
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # view used in an inline view.
 select t.id from (select id from functional.alltypes_view) t
@@ -124,6 +145,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id < 10
+   row-size=4B cardinality=730
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -132,6 +154,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id < 10
+   row-size=4B cardinality=730
 ====
 # Multiple views used in a join.
 select * from functional.alltypes_view t1, functional.alltypes_view_sub t2,
@@ -142,40 +165,49 @@ PLAN-ROOT SINK
 08:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = count(a.bigint_col)
 |  runtime filters: RF000 <- count(a.bigint_col)
+|  row-size=143B cardinality=730
 |
 |--06:TOP-N [LIMIT=100]
 |  |  order by: b.string_col ASC
+|  |  row-size=21B cardinality=0
 |  |
 |  05:AGGREGATE [FINALIZE]
 |  |  output: count(a.bigint_col)
 |  |  group by: b.string_col
 |  |  having: count(a.bigint_col) > 1
+|  |  row-size=21B cardinality=0
 |  |
 |  04:HASH JOIN [INNER JOIN]
 |  |  hash predicates: a.id = b.id
 |  |  runtime filters: RF004 <- b.id
+|  |  row-size=29B cardinality=1
 |  |
 |  |--03:SCAN HDFS [functional.alltypestiny b]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=17B cardinality=8
 |  |
 |  02:SCAN HDFS [functional.alltypesagg a]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: a.bigint_col < 50
 |     runtime filters: RF004 -> a.id
+|     row-size=12B cardinality=1.10K
 |
 07:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=122B cardinality=730
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: functional.alltypes.int_col > 1
 |     runtime filters: RF000 -> int_col
+|     row-size=33B cardinality=730
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id > 1
    runtime filters: RF000 -> functional.alltypes.id, RF002 -> 
functional.alltypes.id
+   row-size=89B cardinality=730
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -184,6 +216,7 @@ PLAN-ROOT SINK
 08:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: int_col = count(a.bigint_col)
 |  runtime filters: RF000 <- count(a.bigint_col)
+|  row-size=143B cardinality=730
 |
 |--15:EXCHANGE [BROADCAST]
 |  |
@@ -193,35 +226,42 @@ PLAN-ROOT SINK
 |  |
 |  06:TOP-N [LIMIT=100]
 |  |  order by: b.string_col ASC
+|  |  row-size=21B cardinality=0
 |  |
 |  13:AGGREGATE [FINALIZE]
 |  |  output: count:merge(a.bigint_col)
 |  |  group by: b.string_col
 |  |  having: count(a.bigint_col) > 1
+|  |  row-size=21B cardinality=0
 |  |
 |  12:EXCHANGE [HASH(b.string_col)]
 |  |
 |  05:AGGREGATE [STREAMING]
 |  |  output: count(a.bigint_col)
 |  |  group by: b.string_col
+|  |  row-size=21B cardinality=1
 |  |
 |  04:HASH JOIN [INNER JOIN, BROADCAST]
 |  |  hash predicates: a.id = b.id
 |  |  runtime filters: RF004 <- b.id
+|  |  row-size=29B cardinality=1
 |  |
 |  |--11:EXCHANGE [BROADCAST]
 |  |  |
 |  |  03:SCAN HDFS [functional.alltypestiny b]
 |  |     partitions=4/4 files=4 size=460B
+|  |     row-size=17B cardinality=8
 |  |
 |  02:SCAN HDFS [functional.alltypesagg a]
 |     partitions=11/11 files=11 size=814.73KB
 |     predicates: a.bigint_col < 50
 |     runtime filters: RF004 -> a.id
+|     row-size=12B cardinality=1.10K
 |
 07:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=122B cardinality=730
 |
 |--10:EXCHANGE [HASH(int_col)]
 |  |
@@ -229,6 +269,7 @@ PLAN-ROOT SINK
 |     partitions=24/24 files=24 size=478.45KB
 |     predicates: functional.alltypes.int_col > 1
 |     runtime filters: RF000 -> int_col
+|     row-size=33B cardinality=730
 |
 09:EXCHANGE [HASH(functional.alltypes.id)]
 |
@@ -236,6 +277,7 @@ PLAN-ROOT SINK
    partitions=24/24 files=24 size=478.45KB
    predicates: functional.alltypes.id > 1
    runtime filters: RF000 -> functional.alltypes.id, RF002 -> 
functional.alltypes.id
+   row-size=89B cardinality=730
 ====
 # Self-join of view to make sure the on clause is properly set
 # in the cloned view instances.
@@ -248,21 +290,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF000 <- functional.alltypes.id
+|  row-size=267B cardinality=7.30K
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF002 <- functional.alltypes.id
+|  row-size=178B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.id
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.id, RF002 -> 
functional.alltypes.id
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -271,27 +318,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF000 <- functional.alltypes.id
+|  row-size=267B cardinality=7.30K
 |
 |--07:EXCHANGE [HASH(functional.alltypes.id)]
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF002 <- functional.alltypes.id
+|  row-size=178B cardinality=7.30K
 |
 |--06:EXCHANGE [HASH(functional.alltypes.id)]
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.id
+|     row-size=89B cardinality=7.30K
 |
 05:EXCHANGE [HASH(functional.alltypes.id)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.id, RF002 -> 
functional.alltypes.id
+   row-size=89B cardinality=7.30K
 ====
 # Self-join views to make sure the using clause is properly set
 # in the cloned view instances.
@@ -304,21 +356,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF000 <- functional.alltypes.id
+|  row-size=267B cardinality=7.30K
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF002 <- functional.alltypes.id
+|  row-size=178B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.id
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.id, RF002 -> 
functional.alltypes.id
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -327,27 +384,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF000 <- functional.alltypes.id
+|  row-size=267B cardinality=7.30K
 |
 |--07:EXCHANGE [HASH(functional.alltypes.id)]
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF002 <- functional.alltypes.id
+|  row-size=178B cardinality=7.30K
 |
 |--06:EXCHANGE [HASH(functional.alltypes.id)]
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.id
+|     row-size=89B cardinality=7.30K
 |
 05:EXCHANGE [HASH(functional.alltypes.id)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.id, RF002 -> 
functional.alltypes.id
+   row-size=89B cardinality=7.30K
 ====
 # Self-join of view to make sure the join op is properly set
 # in the cloned view instances.
@@ -359,18 +421,23 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
+|  row-size=267B cardinality=14.60K
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
+|  row-size=178B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -378,24 +445,29 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [FULL OUTER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
+|  row-size=267B cardinality=14.60K
 |
 |--07:EXCHANGE [HASH(functional.alltypes.id)]
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [LEFT OUTER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
+|  row-size=178B cardinality=7.30K
 |
 |--06:EXCHANGE [HASH(functional.alltypes.id)]
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 05:EXCHANGE [HASH(functional.alltypes.id)]
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=89B cardinality=7.30K
 ====
 # Self-join of view to make sure join hints are properly set
 # in the cloned view instances.
@@ -410,21 +482,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF000 <- functional.alltypes.id
+|  row-size=267B cardinality=7.30K
 |
 |--02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF002 <- functional.alltypes.id
+|  row-size=178B cardinality=7.30K
 |
 |--01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.id
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.id, RF002 -> 
functional.alltypes.id
+   row-size=89B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -433,27 +510,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF000 <- functional.alltypes.id
+|  row-size=267B cardinality=7.30K
 |
 |--07:EXCHANGE [HASH(functional.alltypes.id)]
 |  |
 |  02:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
+|     row-size=89B cardinality=7.30K
 |
 06:EXCHANGE [HASH(functional.alltypes.id)]
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: functional.alltypes.id = functional.alltypes.id
 |  runtime filters: RF002 <- functional.alltypes.id
+|  row-size=178B cardinality=7.30K
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypes]
 |     partitions=24/24 files=24 size=478.45KB
 |     runtime filters: RF000 -> functional.alltypes.id
+|     row-size=89B cardinality=7.30K
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.id, RF002 -> 
functional.alltypes.id
+   row-size=89B cardinality=7.30K
 ====
 # Tests that parentheses are preserved when creating a view
 # enabling proper partition pruning for this particular view.
@@ -462,16 +544,20 @@ select * from functional.alltypes_parens
 PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month = 1
    partitions=1/24 files=1 size=19.95KB
    predicates: (int_col < 100 OR bool_col = FALSE)
+   row-size=89B cardinality=31
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 01:EXCHANGE [UNPARTITIONED]
 |
 00:SCAN HDFS [functional.alltypes]
+   partition predicates: year = 2009, month = 1
    partitions=1/24 files=1 size=19.95KB
    predicates: (int_col < 100 OR bool_col = FALSE)
+   row-size=89B cardinality=31
 ====
 # Tests that slotrefs are correctly marked as assigned inside an inline view 
where
 # possible (see IMPALA-923)
@@ -482,6 +568,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
    predicates: bool_col
+   row-size=1B cardinality=730
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -490,4 +577,5 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypes t]
    partitions=24/24 files=24 size=478.45KB
    predicates: bool_col
+   row-size=1B cardinality=730
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-planner/queries/PlannerTest/with-clause.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-planner/queries/PlannerTest/with-clause.test 
b/testdata/workloads/functional-planner/queries/PlannerTest/with-clause.test
index 9c5f577..a1f045f 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/with-clause.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/with-clause.test
@@ -5,6 +5,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -12,6 +13,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ====
 # Basic test with a single with-clause view that references a catalog view.
 with t as (select int_col x, bigint_col y from functional.alltypes_view)
@@ -21,6 +23,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -28,6 +31,7 @@ PLAN-ROOT SINK
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ====
 # Multiple views in with-clause. Only one view is used.
 with t1 as (select int_col x, bigint_col y from functional.alltypes),
@@ -38,11 +42,13 @@ PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=1
+   row-size=2B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 00:UNION
    constant-operands=1
+   row-size=2B cardinality=1
 ====
 # Multiple views in with-clause. All views are used in a union.
 with t1 as (select int_col x, bigint_col y from functional.alltypes),
@@ -52,34 +58,42 @@ select * from t1 union all select * from t2 union all 
select * from t3
 PLAN-ROOT SINK
 |
 00:UNION
+|  row-size=12B cardinality=7.30K
 |
 |--03:UNION
 |     constant-operands=2
+|     row-size=2B cardinality=2
 |
 |--02:UNION
 |     constant-operands=1
+|     row-size=2B cardinality=1
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 06:EXCHANGE [UNPARTITIONED]
 |
 00:UNION
+|  row-size=12B cardinality=7.30K
 |
 |--05:EXCHANGE [RANDOM]
 |  |
 |  03:UNION
 |     constant-operands=2
+|     row-size=2B cardinality=2
 |
 |--04:EXCHANGE [RANDOM]
 |  |
 |  02:UNION
 |     constant-operands=1
+|     row-size=2B cardinality=1
 |
 01:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
+   row-size=12B cardinality=7.30K
 ====
 # Multiple views in with-clause. All views are used in a join.
 with t1 as (select int_col x, bigint_col y from functional.alltypes),
@@ -92,21 +106,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=36B cardinality=58.40K
 |
 |--02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=12B cardinality=100
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=24B cardinality=5.84K
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> int_col
+|     row-size=12B cardinality=8
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.int_col, RF002 -> int_col
+   row-size=12B cardinality=7.30K
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -115,25 +134,30 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=36B cardinality=58.40K
 |
 |--06:EXCHANGE [BROADCAST]
 |  |
 |  02:SCAN HDFS [functional.alltypessmall]
 |     partitions=4/4 files=4 size=6.32KB
+|     row-size=12B cardinality=100
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=24B cardinality=5.84K
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> int_col
+|     row-size=12B cardinality=8
 |
 00:SCAN HDFS [functional.alltypes]
    partitions=24/24 files=24 size=478.45KB
    runtime filters: RF000 -> functional.alltypes.int_col, RF002 -> int_col
+   row-size=12B cardinality=7.30K
 ====
 # Multiple dependent views in with-clause
 with t1 as (
@@ -158,37 +182,46 @@ PLAN-ROOT SINK
 |
 05:TOP-N [LIMIT=3]
 |  order by: c3 ASC, c1 DESC
+|  row-size=9B cardinality=1
 |
 04:SELECT
 |  predicates: c1 > 0
+|  row-size=9B cardinality=1
 |
 03:AGGREGATE [FINALIZE]
 |  output: max(tinyint_col)
 |  group by: int_col, max(id)
 |  limit: 10
+|  row-size=9B cardinality=5
 |
 02:TOP-N [LIMIT=5]
 |  order by: int_col ASC, tinyint_col ASC
+|  row-size=9B cardinality=5
 |
 01:AGGREGATE [FINALIZE]
 |  output: max(id)
 |  group by: int_col, tinyint_col
+|  row-size=9B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=9B cardinality=100
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 05:TOP-N [LIMIT=3]
 |  order by: c3 ASC, c1 DESC
+|  row-size=9B cardinality=1
 |
 04:SELECT
 |  predicates: c1 > 0
+|  row-size=9B cardinality=1
 |
 03:AGGREGATE [FINALIZE]
 |  output: max(tinyint_col)
 |  group by: int_col, max(id)
 |  limit: 10
+|  row-size=9B cardinality=5
 |
 08:MERGING-EXCHANGE [UNPARTITIONED]
 |  order by: int_col ASC, tinyint_col ASC
@@ -196,19 +229,23 @@ PLAN-ROOT SINK
 |
 02:TOP-N [LIMIT=5]
 |  order by: int_col ASC, tinyint_col ASC
+|  row-size=9B cardinality=5
 |
 07:AGGREGATE [FINALIZE]
 |  output: max:merge(id)
 |  group by: int_col, tinyint_col
+|  row-size=9B cardinality=100
 |
 06:EXCHANGE [HASH(int_col,tinyint_col)]
 |
 01:AGGREGATE [STREAMING]
 |  output: max(id)
 |  group by: int_col, tinyint_col
+|  row-size=9B cardinality=100
 |
 00:SCAN HDFS [functional.alltypessmall]
    partitions=4/4 files=4 size=6.32KB
+   row-size=9B cardinality=100
 ====
 # Self-join of with-clause table to make sure the on clause is properly set
 # in the cloned inline-view instances.
@@ -220,21 +257,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=36B cardinality=128
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=24B cardinality=32
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> int_col
+|     row-size=12B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
+   row-size=12B cardinality=8
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -243,27 +285,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=36B cardinality=128
 |
 |--07:EXCHANGE [HASH(int_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 03:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=24B cardinality=32
 |
 |--06:EXCHANGE [HASH(int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> int_col
+|     row-size=12B cardinality=8
 |
 05:EXCHANGE [HASH(int_col)]
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
+   row-size=12B cardinality=8
 ====
 # Self-join of with-clause table to make sure the using clause is properly set
 # in the cloned inline-view instances.
@@ -275,21 +322,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=36B cardinality=128
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=24B cardinality=32
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> int_col
+|     row-size=12B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
+   row-size=12B cardinality=8
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -298,27 +350,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=36B cardinality=128
 |
 |--07:EXCHANGE [HASH(int_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 03:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=24B cardinality=32
 |
 |--06:EXCHANGE [HASH(int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> int_col
+|     row-size=12B cardinality=8
 |
 05:EXCHANGE [HASH(int_col)]
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
+   row-size=12B cardinality=8
 ====
 # Self-join of with-clause table to make sure the join op is properly set
 # in the cloned inline-view instances.
@@ -329,18 +386,23 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [FULL OUTER JOIN]
 |  hash predicates: int_col = int_col
+|  row-size=36B cardinality=128
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 03:HASH JOIN [LEFT OUTER JOIN]
 |  hash predicates: int_col = int_col
+|  row-size=24B cardinality=32
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=12B cardinality=8
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -348,24 +410,29 @@ PLAN-ROOT SINK
 |
 04:HASH JOIN [FULL OUTER JOIN, PARTITIONED]
 |  hash predicates: int_col = int_col
+|  row-size=36B cardinality=128
 |
 |--07:EXCHANGE [HASH(int_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 03:HASH JOIN [LEFT OUTER JOIN, PARTITIONED]
 |  hash predicates: int_col = int_col
+|  row-size=24B cardinality=32
 |
 |--06:EXCHANGE [HASH(int_col)]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 05:EXCHANGE [HASH(int_col)]
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=12B cardinality=8
 ====
 # Self-join of with-clause table to make sure join hints are properly set
 # in the cloned inline-view instances.
@@ -379,21 +446,26 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=36B cardinality=128
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 03:HASH JOIN [INNER JOIN]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=24B cardinality=32
 |
 |--01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> int_col
+|     row-size=12B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
+   row-size=12B cardinality=8
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
@@ -402,27 +474,32 @@ PLAN-ROOT SINK
 04:HASH JOIN [INNER JOIN, PARTITIONED]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF000 <- int_col
+|  row-size=36B cardinality=128
 |
 |--07:EXCHANGE [HASH(int_col)]
 |  |
 |  02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=12B cardinality=8
 |
 06:EXCHANGE [HASH(int_col)]
 |
 03:HASH JOIN [INNER JOIN, BROADCAST]
 |  hash predicates: int_col = int_col
 |  runtime filters: RF002 <- int_col
+|  row-size=24B cardinality=32
 |
 |--05:EXCHANGE [BROADCAST]
 |  |
 |  01:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
 |     runtime filters: RF000 -> int_col
+|     row-size=12B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
+   row-size=12B cardinality=8
 ====
 # Multiple with clauses. One for the UnionStmt and one for each union operand.
 with t1 as (values('a', 'b'))
@@ -433,27 +510,35 @@ PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=1]
 |  order by: 'c' ASC
+|  row-size=24B cardinality=1
 |
 00:UNION
+|  row-size=24B cardinality=2
 |
 |--02:UNION
 |     constant-operands=1
+|     row-size=24B cardinality=1
 |
 01:UNION
    constant-operands=1
+   row-size=24B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=1]
 |  order by: 'c' ASC
+|  row-size=24B cardinality=1
 |
 00:UNION
+|  row-size=24B cardinality=2
 |
 |--02:UNION
 |     constant-operands=1
+|     row-size=24B cardinality=1
 |
 01:UNION
    constant-operands=1
+   row-size=24B cardinality=1
 ====
 # Multiple with clauses. One for the UnionStmt and one for each union operand.
 with t1 as (values('a', 'b'))
@@ -464,27 +549,35 @@ PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=1]
 |  order by: 'c' ASC
+|  row-size=24B cardinality=1
 |
 00:UNION
+|  row-size=24B cardinality=2
 |
 |--02:UNION
 |     constant-operands=1
+|     row-size=24B cardinality=1
 |
 01:UNION
    constant-operands=1
+   row-size=24B cardinality=1
 ---- DISTRIBUTEDPLAN
 PLAN-ROOT SINK
 |
 03:TOP-N [LIMIT=1]
 |  order by: 'c' ASC
+|  row-size=24B cardinality=1
 |
 00:UNION
+|  row-size=24B cardinality=2
 |
 |--02:UNION
 |     constant-operands=1
+|     row-size=24B cardinality=1
 |
 01:UNION
    constant-operands=1
+   row-size=24B cardinality=1
 ====
 # Test with clause in an insert statement.
 with t1 as (select * from functional.alltypestiny)
@@ -495,20 +588,24 @@ WRITE TO HDFS [functional.alltypesinsert, 
OVERWRITE=false, PARTITION-KEYS=(year,
 |
 01:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=8
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypesinsert, OVERWRITE=false, 
PARTITION-KEYS=(year,month)]
 |  partitions=4
 |
 02:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=8
 |
 01:EXCHANGE [HASH(functional.alltypestiny.year,functional.alltypestiny.month)]
 |
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ====
 # Test with clause in an insert statement and in its query statement.
 with t1 as (select * from functional.alltypestiny)
@@ -521,30 +618,38 @@ WRITE TO HDFS [functional.alltypesinsert, 
OVERWRITE=false, PARTITION-KEYS=(year,
 |
 03:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=16
 |
 00:UNION
+|  row-size=89B cardinality=16
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=89B cardinality=8
 |
 01:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypesinsert, OVERWRITE=false, 
PARTITION-KEYS=(year,month)]
 |  partitions=16
 |
 04:SORT
 |  order by: year ASC NULLS LAST, month ASC NULLS LAST
+|  row-size=89B cardinality=16
 |
 03:EXCHANGE [HASH(year,month)]
 |
 00:UNION
+|  row-size=89B cardinality=16
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=89B cardinality=8
 |
 01:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ====
 # IMPALA-5293: Test with clause in an insert statement and in its query 
statement. Make
 # sure that noclustered hint prevents addition of a sort node before writing 
to HDFS.
@@ -557,12 +662,15 @@ WRITE TO HDFS [functional.alltypesinsert, 
OVERWRITE=false, PARTITION-KEYS=(year,
 |  partitions=16
 |
 00:UNION
+|  row-size=89B cardinality=16
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=89B cardinality=8
 |
 01:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ---- DISTRIBUTEDPLAN
 WRITE TO HDFS [functional.alltypesinsert, OVERWRITE=false, 
PARTITION-KEYS=(year,month)]
 |  partitions=16
@@ -570,12 +678,15 @@ WRITE TO HDFS [functional.alltypesinsert, 
OVERWRITE=false, PARTITION-KEYS=(year,
 03:EXCHANGE [HASH(year,month)]
 |
 00:UNION
+|  row-size=89B cardinality=16
 |
 |--02:SCAN HDFS [functional.alltypestiny]
 |     partitions=4/4 files=4 size=460B
+|     row-size=89B cardinality=8
 |
 01:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
+   row-size=89B cardinality=8
 ====
 # Test with clause with a query statement that references the same column from 
a
 # base table multiple times (IMPALA-1412)
@@ -589,6 +700,7 @@ PLAN-ROOT SINK
 00:SCAN HDFS [functional.alltypestiny]
    partitions=4/4 files=4 size=460B
    predicates: bigint_col = bigint_col
+   row-size=9B cardinality=1
 ====
 # IMPALA-2414: Test basic correlated WITH clause view.
 select pos from functional.allcomplextypes t inner join
@@ -599,16 +711,21 @@ on v.pos = t.id
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=24B cardinality=0
 |
 |--04:NESTED LOOP JOIN [INNER JOIN]
 |  |  join predicates: pos = t.id
+|  |  row-size=24B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=16B cardinality=1
 |  |
 |  03:UNNEST [t.int_array_col]
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes t]
    partitions=0/0 files=0 size=0B
+   row-size=16B cardinality=0
 ====
 # IMPALA-2414: Test correlated WITH clause view nested in another WITH clause.
 select pos from functional.allcomplextypes t inner join
@@ -618,15 +735,20 @@ select pos from functional.allcomplextypes t inner join
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=20B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=12B cardinality=1
 |  |
 |  03:UNNEST [t.int_array_col]
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes t]
    partitions=0/0 files=0 size=0B
+   row-size=12B cardinality=0
 ====
 # IMPALA-2414: Test correlated WITH clause view nested in another WITH clause.
 with w1 as (select pos from functional.allcomplextypes t,
@@ -636,15 +758,20 @@ select * from w1
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=20B cardinality=0
 |
 |--04:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=20B cardinality=10
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=12B cardinality=1
 |  |
 |  03:UNNEST [t.int_array_col]
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes t]
    partitions=0/0 files=0 size=0B
+   row-size=12B cardinality=0
 ====
 # IMPALA-2414: Test multiple correlated WITH clause views that are joined.
 select pos from functional.allcomplextypes t inner join
@@ -655,26 +782,37 @@ select pos from functional.allcomplextypes t inner join
 PLAN-ROOT SINK
 |
 01:SUBPLAN
+|  row-size=64B cardinality=0
 |
 |--10:NESTED LOOP JOIN [INNER JOIN]
 |  |  join predicates: value = t.id
+|  |  row-size=64B cardinality=10.00K
 |  |
 |  |--02:SINGULAR ROW SRC
+|  |     row-size=52B cardinality=1
 |  |
 |  09:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=12B cardinality=10.00K
 |  |
 |  |--06:UNNEST [t.map_map_col.value]
+|  |     row-size=4B cardinality=10
 |  |
 |  08:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=8B cardinality=1.00K
 |  |
 |  |--05:UNNEST [t.map_map_col.value]
+|  |     row-size=0B cardinality=10
 |  |
 |  07:NESTED LOOP JOIN [CROSS JOIN]
+|  |  row-size=8B cardinality=100
 |  |
 |  |--04:UNNEST [t.int_array_col]
+|  |     row-size=0B cardinality=10
 |  |
 |  03:UNNEST [t.int_array_col]
+|     row-size=8B cardinality=10
 |
 00:SCAN HDFS [functional.allcomplextypes t]
    partitions=0/0 files=0 size=0B
+   row-size=52B cardinality=0
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-query/queries/QueryTest/corrupt-stats.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/corrupt-stats.test 
b/testdata/workloads/functional-query/queries/QueryTest/corrupt-stats.test
index da88f3b..74707db 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/corrupt-stats.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/corrupt-stats.test
@@ -66,7 +66,7 @@ explain select count(*) from corrupted where org = 1;
 '|  output: count(*)'
 '|'
 '00:SCAN HDFS [$DATABASE.corrupted]'
-'   partitions=1/2 files=1 size=24B'
+'   partitions=1/2 files=1 size=24B row-size=0B cardinality=0'
 ---- TYPES
 STRING
 ====
@@ -92,7 +92,7 @@ explain select count(*) from corrupted;
 '|  output: count(*)'
 '|'
 '00:SCAN HDFS [$DATABASE.corrupted]'
-'   partitions=2/2 files=2 size=48B'
+'   partitions=2/2 files=2 size=48B row-size=0B cardinality=6'
 ---- TYPES
 STRING
 ====
@@ -117,7 +117,7 @@ explain select count(*) from corrupted;
 '|  output: count(*)'
 '|'
 '00:SCAN HDFS [$DATABASE.corrupted]'
-'   partitions=2/2 files=2 size=48B'
+'   partitions=2/2 files=2 size=48B row-size=0B cardinality=6'
 ---- TYPES
 STRING
 ====
@@ -139,7 +139,7 @@ explain select count(*) from corrupted where org = 2;
 '|  output: count(*)'
 '|'
 '00:SCAN HDFS [$DATABASE.corrupted]'
-'   partitions=1/2 files=1 size=24B'
+'   partitions=1/2 files=1 size=24B row-size=0B cardinality=6'
 ---- TYPES
 STRING
 ====
@@ -165,7 +165,7 @@ explain select count(*) from corrupted_no_part;
 '|  output: count(*)'
 '|'
 '00:SCAN HDFS [$DATABASE.corrupted_no_part]'
-'   partitions=1/1 files=1 size=6B'
+'   partitions=1/1 files=1 size=6B row-size=0B cardinality=3'
 ---- TYPES
 STRING
 ====
@@ -197,7 +197,7 @@ explain select count(*) from corrupted_no_part;
 '|  output: count(*)'
 '|'
 '00:SCAN HDFS [$DATABASE.corrupted_no_part]'
-'   partitions=1/1 files=1 size=6B'
+'   partitions=1/1 files=1 size=6B row-size=0B cardinality=0'
 ---- TYPES
 STRING
 ====
@@ -219,7 +219,7 @@ explain select count(*) from corrupted_no_part;
 '|  output: count(*)'
 '|'
 '00:SCAN HDFS [$DATABASE.corrupted_no_part]'
-'   partitions=1/1 files=1 size=6B'
+'   partitions=1/1 files=1 size=6B row-size=0B cardinality=unavailable'
 ---- TYPES
 STRING
 ====

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-query/queries/QueryTest/explain-level1.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/explain-level1.test 
b/testdata/workloads/functional-query/queries/QueryTest/explain-level1.test
index 13f7148..9a6dea3 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/explain-level1.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/explain-level1.test
@@ -15,15 +15,18 @@ row_regex:.*Per-Host Resource Estimates: Memory=[0-9.]*MB.*
 '02:HASH JOIN [INNER JOIN, BROADCAST]'
 '|  hash predicates: l_orderkey = o_orderkey'
 '|  runtime filters: RF000 <- o_orderkey'
+row_regex:.*row-size=.* cardinality=.*
 '|'
 '|--03:EXCHANGE [BROADCAST]'
 '|  |'
 '|  01:SCAN HDFS [tpch.orders]'
 row_regex:.*partitions=1/1 files=1 size=.*
+row_regex:.*row-size=.* cardinality=.*
 '|'
 '00:SCAN HDFS [tpch.lineitem]'
 row_regex:.*partitions=1/1 files=1 size=.*
 '   runtime filters: RF000 -> l_orderkey'
+row_regex:.*row-size=.* cardinality=.*
 ====
 ---- QUERY
 # Tests the warning about missing table stats in the explain header.

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-query/queries/QueryTest/explain-level2.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/explain-level2.test 
b/testdata/workloads/functional-query/queries/QueryTest/explain-level2.test
index 7d65337..6c07f77 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/explain-level2.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/explain-level2.test
@@ -11,13 +11,13 @@ row_regex:.*Per-Host Resource Estimates: Memory=[0-9.]*MB.*
 '= o_orderkey'
 ''
 'F02:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1'
-row_regex:.*|  Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B 
mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1
+row_regex:.*Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B 
mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1
 'PLAN-ROOT SINK'
-row_regex:.*|  mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=0
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=0
 '|'
 '04:EXCHANGE [UNPARTITIONED]'
-row_regex:.*|  mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=0
-'|  tuple-ids=0,1 row-size=402B cardinality=5757710'
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=0
+'|  tuple-ids=0,1 row-size=402B cardinality=5.76M'
 '|  in pipelines: 00(GETNEXT)'
 '|'
 'F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3'
@@ -26,25 +26,25 @@ row_regex:.*Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B 
mem-reservation=[0-9
 '|  hash predicates: l_orderkey = o_orderkey'
 '|  fk/pk conjuncts: l_orderkey = o_orderkey'
 '|  runtime filters: RF000[bloom] <- o_orderkey'
-row_regex:.*|  mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
spill-buffer=[0-9.]*MB thread-reservation=0.*
-'|  tuple-ids=0,1 row-size=402B cardinality=5757710'
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
spill-buffer=[0-9.]*MB thread-reservation=0.*
+'|  tuple-ids=0,1 row-size=402B cardinality=5.76M'
 '|  in pipelines: 00(GETNEXT), 01(OPEN)'
 '|'
 '|--03:EXCHANGE [BROADCAST]'
-row_regex:.*|  |  mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=0
-'|  |  tuple-ids=1 row-size=171B cardinality=1500000'
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=0
+'|  |  tuple-ids=1 row-size=171B cardinality=1.50M'
 '|  |  in pipelines: 01(GETNEXT)'
 '|  |'
 '|  F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=2'
-row_regex:.*|  Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B 
mem-reservation=[0-9.]*[A-Z]*B thread-reservation=.*
+row_regex:.*Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B 
mem-reservation=[0-9.]*[A-Z]*B thread-reservation=.*
 '|  01:SCAN HDFS [tpch.orders, RANDOM]'
 row_regex:.*partitions=1/1 files=1 size=.*
 '|     stored statistics:'
 row_regex:.*table: rows=1500000 size=.*
 '|       columns: all'
-row_regex:.*|     extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
-row_regex:.*|     mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=1.*
-'|     tuple-ids=1 row-size=171B cardinality=1500000'
+row_regex:.*extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=1.*
+'|     tuple-ids=1 row-size=171B cardinality=1.50M'
 '|     in pipelines: 01(GETNEXT)'
 '|'
 '00:SCAN HDFS [tpch.lineitem, RANDOM]'
@@ -53,9 +53,9 @@ row_regex:.*partitions=1/1 files=1 size=.*
 '   stored statistics:'
 row_regex:.*table: rows=6001215 size=.*
 '     columns: all'
-row_regex:.*   extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
-row_regex:.*   mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=1.*
-'   tuple-ids=0 row-size=231B cardinality=6001215'
+row_regex:.*extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=1.*
+'   tuple-ids=0 row-size=231B cardinality=6.00M'
 '   in pipelines: 00(GETNEXT)'
 ====
 ---- QUERY

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-query/queries/QueryTest/explain-level3.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/explain-level3.test 
b/testdata/workloads/functional-query/queries/QueryTest/explain-level3.test
index 23e97f0..d095028 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/explain-level3.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/explain-level3.test
@@ -13,28 +13,28 @@ row_regex:.*Per-Host Resource Estimates: Memory=[0-9.]*MB.*
 'F02:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1'
 row_regex:.*Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B 
mem-reservation=[0-9.]*[A-Z]*B thread-reservation=1
 '  PLAN-ROOT SINK'
-row_regex:.*  |  mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=0
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=0
 '  |'
 '  04:EXCHANGE [UNPARTITIONED]'
-row_regex:.*     mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=0
-'     tuple-ids=0,1 row-size=402B cardinality=5757710'
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=0
+'     tuple-ids=0,1 row-size=402B cardinality=5.76M'
 '     in pipelines: 00(GETNEXT)'
 ''
 'F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3'
 row_regex:.*Per-Host Resources: mem-estimate=[0-9.]*[A-Z]*B 
mem-reservation=[0-9.]*[A-Z]*B thread-reservation=.*
 '  DATASTREAM SINK [FRAGMENT=F02, EXCHANGE=04, UNPARTITIONED]'
-row_regex:.*  |  mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=0
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=0
 '  02:HASH JOIN [INNER JOIN, BROADCAST]'
 '  |  hash predicates: l_orderkey = o_orderkey'
 '  |  fk/pk conjuncts: l_orderkey = o_orderkey'
 '  |  runtime filters: RF000[bloom] <- o_orderkey'
-row_regex:.*  |  mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
spill-buffer=[0-9.]*MB thread-reservation=.*
-'  |  tuple-ids=0,1 row-size=402B cardinality=5757710'
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
spill-buffer=[0-9.]*MB thread-reservation=.*
+'  |  tuple-ids=0,1 row-size=402B cardinality=5.76M'
 '  |  in pipelines: 00(GETNEXT), 01(OPEN)'
 '  |'
 '  |--03:EXCHANGE [BROADCAST]'
-row_regex:.*  |     mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=0
-'  |     tuple-ids=1 row-size=171B cardinality=1500000'
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=0
+'  |     tuple-ids=1 row-size=171B cardinality=1.50M'
 '  |     in pipelines: 01(GETNEXT)'
 '  |'
 '  00:SCAN HDFS [tpch.lineitem, RANDOM]'
@@ -43,9 +43,9 @@ row_regex:.*partitions=1/1 files=1 size=.*
 '     stored statistics:'
 row_regex:.*table: rows=6001215 size=.*
 '       columns: all'
-row_regex:.*|     extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
-row_regex:.*     mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=1.*
-'     tuple-ids=0 row-size=231B cardinality=6001215'
+row_regex:.*extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
+row_regex:.*mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=1.*
+'     tuple-ids=0 row-size=231B cardinality=6.00M'
 '     in pipelines: 00(GETNEXT)'
 ''
 'F01:PLAN FRAGMENT [RANDOM] hosts=2 instances=2'
@@ -59,7 +59,7 @@ row_regex:.*table: rows=1500000 size=.*
 '       columns: all'
 row_regex:.*   extrapolated-rows=disabled max-scan-range-rows=[0-9]*.*
 row_regex:.*     mem-estimate=[0-9.]*[A-Z]*B mem-reservation=[0-9.]*[A-Z]*B 
thread-reservation=.*
-'     tuple-ids=1 row-size=171B cardinality=1500000'
+'     tuple-ids=1 row-size=171B cardinality=1.50M'
 '     in pipelines: 01(GETNEXT)'
 ====
 ---- QUERY

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test
----------------------------------------------------------------------
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test
 
b/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test
index 7a4999c..0c6deb4 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test
@@ -70,7 +70,7 @@ row_regex:.*table: rows=3650 size=.*
 '     columns: all'
 row_regex:.* extrapolated-rows=3650.*
 row_regex:.*mem-estimate=.* mem-reservation=.*
-'   tuple-ids=0 row-size=4B cardinality=3650'
+'   tuple-ids=0 row-size=4B cardinality=3.65K'
 '   in pipelines: 00(GETNEXT)'
 ---- TYPES
 STRING
@@ -91,6 +91,7 @@ row_regex:.*mem-estimate=.* mem-reservation=.*
 row_regex:.*mem-estimate=.* mem-reservation=.*
 '|'
 '00:SCAN HDFS [$DATABASE.alltypes]'
+'   partition predicates: month IN (CAST(1 AS INT), CAST(2 AS INT), CAST(3 AS 
INT))'
 row_regex:.*partitions=3/12 files=3 size=.*
 '   stored statistics:'
 row_regex:.*table: rows=3650 size=.*
@@ -126,7 +127,7 @@ row_regex:.*table: rows=3650 size=.*
 '     columns: all'
 row_regex:.* extrapolated-rows=7300.*
 row_regex:.*mem-estimate=.* mem-reservation=.*
-'   tuple-ids=0 row-size=4B cardinality=7300'
+'   tuple-ids=0 row-size=4B cardinality=7.30K'
 '   in pipelines: 00(GETNEXT)'
 ---- TYPES
 STRING
@@ -149,6 +150,7 @@ row_regex:.*Per-Host Resources: mem-estimate=.* 
mem-reservation=.*
 row_regex:.*mem-estimate=.* mem-reservation=.*
 '|'
 '00:SCAN HDFS [$DATABASE.alltypes]'
+'   partition predicates: year = CAST(2010 AS INT)'
 row_regex:.*partitions=12/24 files=12 size=.*
 '   stored statistics:'
 row_regex:.*table: rows=3650 size=.*
@@ -156,7 +158,7 @@ row_regex:.*table: rows=3650 size=.*
 '     columns: all'
 row_regex:.* extrapolated-rows=3651.*
 row_regex:.*mem-estimate=.* mem-reservation=.*
-'   tuple-ids=0 row-size=4B cardinality=3651'
+'   tuple-ids=0 row-size=4B cardinality=3.65K'
 '   in pipelines: 00(GETNEXT)'
 ---- TYPES
 STRING
@@ -178,6 +180,7 @@ row_regex:.*Per-Host Resources: mem-estimate=.* 
mem-reservation=.*
 row_regex:.*mem-estimate=.* mem-reservation=.*
 '|'
 '00:SCAN HDFS [$DATABASE.alltypes]'
+'   partition predicates: year = CAST(2010 AS INT)'
 row_regex:.*partitions=12/24 files=12 size=.*
 '   stored statistics:'
 row_regex:.*table: rows=10950 size=.*
@@ -185,7 +188,7 @@ row_regex:.*table: rows=10950 size=.*
 '     columns: all'
 row_regex:.* extrapolated-rows=3651
 row_regex:.*mem-estimate=.* mem-reservation=.*
-'   tuple-ids=0 row-size=4B cardinality=3651'
+'   tuple-ids=0 row-size=4B cardinality=3.65K'
 '   in pipelines: 00(GETNEXT)'
 ---- TYPES
 STRING

http://git-wip-us.apache.org/repos/asf/impala/blob/a7ea86b7/tests/metadata/test_explain.py
----------------------------------------------------------------------
diff --git a/tests/metadata/test_explain.py b/tests/metadata/test_explain.py
index ba206f2..48a6d69 100644
--- a/tests/metadata/test_explain.py
+++ b/tests/metadata/test_explain.py
@@ -78,7 +78,7 @@ class TestExplain(ImpalaTestSuite):
     tbl_name = 'alltypes'
 
     def check_cardinality(query_result, expected_cardinality):
-      regex = re.compile('tuple-ids=\d+ row-size=\d+B cardinality=(\d+)')
+      regex = re.compile(' row-size=\d+B cardinality=(.*)$')
       for res in query_result:
         m = regex.match(res.strip())
         if m:
@@ -94,12 +94,12 @@ class TestExplain(ImpalaTestSuite):
     # Half of the partitions are filtered out, cardinality should be 3650.
     result = self.execute_query("explain select * from %s.%s where year = 
2010" % (
         db_name, tbl_name), query_options={'explain_level':3})
-    check_cardinality(result.data, '3650')
+    check_cardinality(result.data, '3.65K')
 
     # None of the partitions are filtered out, cardinality should be 7300.
     result = self.execute_query("explain select * from %s.%s" % (db_name, 
tbl_name),
         query_options={'explain_level':3})
-    check_cardinality(result.data, '7300')
+    check_cardinality(result.data, '7.30K')
 
     # Create a partitioned table with a mixed set of available stats,
     mixed_tbl = unique_database + ".t"

Reply via email to