Repository: hive Updated Branches: refs/heads/master 112cbd19c -> 24f1861e8
http://git-wip-us.apache.org/repos/asf/hive/blob/24f1861e/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out index c534cb5..6a9a76c 100644 --- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out @@ -3597,7 +3597,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=1) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=2) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE @@ -3657,7 +3657,7 @@ STAGE PLANS: vectorized: true Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=1) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=2) mode: final outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE http://git-wip-us.apache.org/repos/asf/hive/blob/24f1861e/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction.q.out index e62d913..10fc0f3 100644 --- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction.q.out @@ -132,7 +132,7 @@ STAGE PLANS: projectedOutputColumns: [1] Statistics: Num rows: 57 Data size: 10146 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=57) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=114) Group By Vectorization: aggregators: VectorUDAFMinLong(col 1) -> int, VectorUDAFMaxLong(col 1) -> int, VectorUDAFBloomFilter(col 1) -> binary className: VectorGroupByOperator @@ -225,7 +225,7 @@ STAGE PLANS: vectorized: true Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=57) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=114) Group By Vectorization: aggregators: VectorUDAFMinLong(col 0) -> int, VectorUDAFMaxLong(col 1) -> int, VectorUDAFBloomFilterMerge(col 2) -> binary className: VectorGroupByOperator @@ -369,7 +369,7 @@ STAGE PLANS: projectedOutputColumns: [0] Statistics: Num rows: 57 Data size: 10146 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=57) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=114) Group By Vectorization: aggregators: VectorUDAFMinString(col 0) -> string, VectorUDAFMaxString(col 0) -> string, VectorUDAFBloomFilter(col 0) -> binary className: VectorGroupByOperator @@ -462,7 +462,7 @@ STAGE PLANS: vectorized: true Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=57) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=114) Group By Vectorization: aggregators: VectorUDAFMinString(col 0) -> string, VectorUDAFMaxString(col 1) -> string, VectorUDAFBloomFilterMerge(col 2) -> binary className: VectorGroupByOperator @@ -606,7 +606,7 @@ STAGE PLANS: projectedOutputColumns: [0] Statistics: Num rows: 57 Data size: 10146 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=57) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=114) Group By Vectorization: aggregators: VectorUDAFMinString(col 0) -> string, VectorUDAFMaxString(col 0) -> string, VectorUDAFBloomFilter(col 0) -> binary className: VectorGroupByOperator @@ -699,7 +699,7 @@ STAGE PLANS: vectorized: true Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=57) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=114) Group By Vectorization: aggregators: VectorUDAFMinString(col 0) -> string, VectorUDAFMaxString(col 1) -> string, VectorUDAFBloomFilterMerge(col 2) -> binary className: VectorGroupByOperator @@ -844,7 +844,7 @@ STAGE PLANS: projectedOutputColumns: [1] Statistics: Num rows: 57 Data size: 10146 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=57) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=114) Group By Vectorization: aggregators: VectorUDAFMinLong(col 1) -> int, VectorUDAFMaxLong(col 1) -> int, VectorUDAFBloomFilter(col 1) -> binary className: VectorGroupByOperator @@ -914,7 +914,7 @@ STAGE PLANS: projectedOutputColumns: [1] Statistics: Num rows: 57 Data size: 10146 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=57) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=114) Group By Vectorization: aggregators: VectorUDAFMinLong(col 1) -> int, VectorUDAFMaxLong(col 1) -> int, VectorUDAFBloomFilter(col 1) -> binary className: VectorGroupByOperator @@ -1009,7 +1009,7 @@ STAGE PLANS: vectorized: true Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=57) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=114) Group By Vectorization: aggregators: VectorUDAFMinLong(col 0) -> int, VectorUDAFMaxLong(col 1) -> int, VectorUDAFBloomFilterMerge(col 2) -> binary className: VectorGroupByOperator @@ -1038,7 +1038,7 @@ STAGE PLANS: vectorized: true Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=57) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=114) Group By Vectorization: aggregators: VectorUDAFMinLong(col 0) -> int, VectorUDAFMaxLong(col 1) -> int, VectorUDAFBloomFilterMerge(col 2) -> binary className: VectorGroupByOperator @@ -1183,7 +1183,7 @@ STAGE PLANS: projectedOutputColumns: [0] Statistics: Num rows: 57 Data size: 10146 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=57) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=114) Group By Vectorization: aggregators: VectorUDAFMinString(col 0) -> string, VectorUDAFMaxString(col 0) -> string, VectorUDAFBloomFilter(col 0) -> binary className: VectorGroupByOperator @@ -1210,7 +1210,7 @@ STAGE PLANS: projectedOutputColumns: [1] Statistics: Num rows: 57 Data size: 10146 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=57) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=114) Group By Vectorization: aggregators: VectorUDAFMinLong(col 1) -> int, VectorUDAFMaxLong(col 1) -> int, VectorUDAFBloomFilter(col 1) -> binary className: VectorGroupByOperator @@ -1303,7 +1303,7 @@ STAGE PLANS: vectorized: true Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=57) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=114) Group By Vectorization: aggregators: VectorUDAFMinString(col 0) -> string, VectorUDAFMaxString(col 1) -> string, VectorUDAFBloomFilterMerge(col 2) -> binary className: VectorGroupByOperator @@ -1332,7 +1332,7 @@ STAGE PLANS: vectorized: true Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=57) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=114) Group By Vectorization: aggregators: VectorUDAFMinLong(col 0) -> int, VectorUDAFMaxLong(col 1) -> int, VectorUDAFBloomFilterMerge(col 2) -> binary className: VectorGroupByOperator @@ -1476,7 +1476,7 @@ STAGE PLANS: projectedOutputColumns: [1] Statistics: Num rows: 29 Data size: 5162 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=29) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=58) Group By Vectorization: aggregators: VectorUDAFMinLong(col 1) -> int, VectorUDAFMaxLong(col 1) -> int, VectorUDAFBloomFilter(col 1) -> binary className: VectorGroupByOperator @@ -1569,7 +1569,7 @@ STAGE PLANS: vectorized: true Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=29) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=58) Group By Vectorization: aggregators: VectorUDAFMinLong(col 0) -> int, VectorUDAFMaxLong(col 1) -> int, VectorUDAFBloomFilterMerge(col 2) -> binary className: VectorGroupByOperator @@ -1685,7 +1685,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 57 Data size: 228 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=47) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=94) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE @@ -1733,7 +1733,7 @@ STAGE PLANS: Execution mode: vectorized, llap Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=47) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=94) mode: final outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE http://git-wip-us.apache.org/repos/asf/hive/blob/24f1861e/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction2.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction2.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction2.q.out index d9fd706..a03466f 100644 --- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction2.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction2.q.out @@ -152,7 +152,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=18) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=36) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE @@ -200,7 +200,7 @@ STAGE PLANS: Execution mode: vectorized, llap Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=18) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=36) mode: final outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE @@ -288,7 +288,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 20 Data size: 2240 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=16) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=32) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: COMPLETE @@ -336,7 +336,7 @@ STAGE PLANS: Execution mode: vectorized, llap Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=16) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=32) mode: final outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: COMPLETE @@ -424,7 +424,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=30) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=60) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE @@ -472,7 +472,7 @@ STAGE PLANS: Execution mode: vectorized, llap Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=30) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=60) mode: final outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE @@ -560,7 +560,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 20 Data size: 1120 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=20) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=40) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE @@ -608,7 +608,7 @@ STAGE PLANS: Execution mode: vectorized, llap Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=20) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=40) mode: final outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE @@ -696,7 +696,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 20 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=20) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=40) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE @@ -744,7 +744,7 @@ STAGE PLANS: Execution mode: vectorized, llap Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=20) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=40) mode: final outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE @@ -832,7 +832,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=15) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=30) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE @@ -880,7 +880,7 @@ STAGE PLANS: Execution mode: vectorized, llap Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=15) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=30) mode: final outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE @@ -968,7 +968,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=15) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=30) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 282 Basic stats: COMPLETE Column stats: COMPLETE @@ -1016,7 +1016,7 @@ STAGE PLANS: Execution mode: vectorized, llap Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=15) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=30) mode: final outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 282 Basic stats: COMPLETE Column stats: COMPLETE @@ -1104,7 +1104,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 20 Data size: 1880 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=15) + aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=30) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 282 Basic stats: COMPLETE Column stats: COMPLETE @@ -1152,7 +1152,7 @@ STAGE PLANS: Execution mode: vectorized, llap Reduce Operator Tree: Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=15) + aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=30) mode: final outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 282 Basic stats: COMPLETE Column stats: COMPLETE http://git-wip-us.apache.org/repos/asf/hive/blob/24f1861e/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out b/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out index 32609eb..087f916 100644 --- a/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out +++ b/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out @@ -824,101 +824,34 @@ POSTHOOK: query: explain analyze select a.key, a.value, b.value from tab a join tab_part b on a.key = b.key POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 3 <- Map 1 (CUSTOM_EDGE), Reducer 2 (BROADCAST_EDGE) - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - filterExpr: key is not null (type: boolean) - Statistics: Num rows: 242/242 Data size: 4502 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242/242 Data size: 4502 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242/242 Data size: 4502 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242/242 Data size: 4502 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string) - Select Operator - expressions: _col0 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242/242 Data size: 4502 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=242) - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1/3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1/3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: binary) - Map 3 - Map Operator Tree: - TableScan - alias: b - filterExpr: (key is not null and (key BETWEEN DynamicValue(RS_6_a_key_min) AND DynamicValue(RS_6_a_key_max) and in_bloom_filter(key, DynamicValue(RS_6_a_key_bloom_filter)))) (type: boolean) - Statistics: Num rows: 500/500 Data size: 9312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (key is not null and (key BETWEEN DynamicValue(RS_6_a_key_min) AND DynamicValue(RS_6_a_key_max) and in_bloom_filter(key, DynamicValue(RS_6_a_key_bloom_filter)))) (type: boolean) - Statistics: Num rows: 500/244 Data size: 9312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500/244 Data size: 9312 Basic stats: COMPLETE Column stats: NONE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3 - input vertices: - 0 Map 1 - Statistics: Num rows: 550/480 Data size: 10243 Basic stats: COMPLETE Column stats: NONE - HybridGraceHashJoin: true - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 550/480 Data size: 10243 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 550/480 Data size: 10243 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Reducer 2 - Reduce Operator Tree: - Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=242) - mode: final - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1/1 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1/1 Data size: 12 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: binary) - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink +Plan optimized by CBO. + +Vertex dependency in root stage +Map 2 <- Map 1 (CUSTOM_EDGE) + +Stage-0 + Fetch Operator + limit:-1 + Stage-1 + Map 2 + File Output Operator [FS_10] + Select Operator [SEL_9] (rows=550/480 width=18) + Output:["_col0","_col1","_col2"] + Map Join Operator [MAPJOIN_25] (rows=550/480 width=18) + BucketMapJoin:true,Conds:RS_6._col0=SEL_5._col0(Inner),HybridGraceHashJoin:true,Output:["_col0","_col1","_col3"] + <-Map 1 [CUSTOM_EDGE] + MULTICAST [RS_6] + PartitionCols:_col0 + Select Operator [SEL_2] (rows=242/242 width=18) + Output:["_col0","_col1"] + Filter Operator [FIL_13] (rows=242/242 width=18) + predicate:key is not null + TableScan [TS_0] (rows=242/242 width=18) + default@tab,a,Tbl:COMPLETE,Col:NONE,Output:["key","value"] + <-Select Operator [SEL_5] (rows=500/500 width=18) + Output:["_col0","_col1"] + Filter Operator [FIL_14] (rows=500/500 width=18) + predicate:key is not null + TableScan [TS_3] (rows=500/500 width=18) + default@tab_part,b,Tbl:COMPLETE,Col:NONE,Output:["key","value"] http://git-wip-us.apache.org/repos/asf/hive/blob/24f1861e/ql/src/test/results/clientpositive/tez/explainuser_3.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out index b3359d3..adcff44 100644 --- a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out +++ b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out @@ -652,104 +652,34 @@ POSTHOOK: query: explain select a.key, a.value, b.value from tab a join tab_part b on a.key = b.key POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 +Plan optimized by CBO. -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Map 3 <- Map 1 (CUSTOM_EDGE), Reducer 2 (BROADCAST_EDGE) - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - filterExpr: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 4502 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 242 Data size: 4502 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 242 Data size: 4502 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 242 Data size: 4502 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string) - Select Operator - expressions: _col0 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 242 Data size: 4502 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=242) - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: binary) - Execution mode: vectorized - Map 3 - Map Operator Tree: - TableScan - alias: b - filterExpr: (key is not null and (key BETWEEN DynamicValue(RS_6_a_key_min) AND DynamicValue(RS_6_a_key_max) and in_bloom_filter(key, DynamicValue(RS_6_a_key_bloom_filter)))) (type: boolean) - Statistics: Num rows: 500 Data size: 9312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (key is not null and (key BETWEEN DynamicValue(RS_6_a_key_min) AND DynamicValue(RS_6_a_key_max) and in_bloom_filter(key, DynamicValue(RS_6_a_key_bloom_filter)))) (type: boolean) - Statistics: Num rows: 500 Data size: 9312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 9312 Basic stats: COMPLETE Column stats: NONE - Map Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3 - input vertices: - 0 Map 1 - Statistics: Num rows: 550 Data size: 10243 Basic stats: COMPLETE Column stats: NONE - HybridGraceHashJoin: true - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 550 Data size: 10243 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 550 Data size: 10243 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Execution mode: vectorized - Reducer 2 - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=242) - mode: final - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: binary) - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink +Vertex dependency in root stage +Map 2 <- Map 1 (CUSTOM_EDGE) + +Stage-0 + Fetch Operator + limit:-1 + Stage-1 + Map 2 vectorized + File Output Operator [FS_34] + Select Operator [SEL_33] (rows=550 width=18) + Output:["_col0","_col1","_col2"] + Map Join Operator [MAPJOIN_32] (rows=550 width=18) + BucketMapJoin:true,Conds:RS_29._col0=SEL_31._col0(Inner),HybridGraceHashJoin:true,Output:["_col0","_col1","_col3"] + <-Map 1 [CUSTOM_EDGE] vectorized + MULTICAST [RS_29] + PartitionCols:_col0 + Select Operator [SEL_28] (rows=242 width=18) + Output:["_col0","_col1"] + Filter Operator [FIL_27] (rows=242 width=18) + predicate:key is not null + TableScan [TS_0] (rows=242 width=18) + default@tab,a,Tbl:COMPLETE,Col:NONE,Output:["key","value"] + <-Select Operator [SEL_31] (rows=500 width=18) + Output:["_col0","_col1"] + Filter Operator [FIL_30] (rows=500 width=18) + predicate:key is not null + TableScan [TS_3] (rows=500 width=18) + default@tab_part,b,Tbl:COMPLETE,Col:NONE,Output:["key","value"]