http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join22.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/beelinepositive/auto_join22.q.out b/ql/src/test/results/beelinepositive/auto_join22.q.out deleted file mode 100644 index d0a375a..0000000 --- a/ql/src/test/results/beelinepositive/auto_join22.q.out +++ /dev/null @@ -1,419 +0,0 @@ -Saving all output to "!!{outputDirectory}!!/auto_join22.q.raw". Enter "record" with no arguments to stop it. ->>> !run !!{qFileDirectory}!!/auto_join22.q ->>> set hive.auto.convert.join = true; -No rows affected ->>> explain -SELECT sum(hash(src5.src1_value)) FROM (SELECT src3.*, src4.value as src4_value, src4.key as src4_key FROM src src4 JOIN (SELECT src2.*, src1.key as src1_key, src1.value as src1_value FROM src src1 JOIN src src2 ON src1.key = src2.key) src3 ON src3.src1_key = src4.key) src5; -'Explain' -'ABSTRACT SYNTAX TREE:' -' (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src4) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src2))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) src1_key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) src1_value)))) src3) (= (. (TOK_TABLE_OR_COL src3) src1_key) (. (TOK_TABLE_OR_COL src4) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src3))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src4) value) src4_value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src4) key) src4_key)))) src5)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL src5) src1_value))) ))))' -'' -'STAGE DEPENDENCIES:' -' Stage-11 is a root stage , consists of Stage-14, Stage-15, Stage-1' -' Stage-14 has a backup stage: Stage-1' -' Stage-9 depends on stages: Stage-14' -' Stage-8 depends on stages: Stage-1, Stage-9, Stage-10 , consists of Stage-12, Stage-13, Stage-2' -' Stage-12 has a backup stage: Stage-2' -' Stage-6 depends on stages: Stage-12' -' Stage-3 depends on stages: Stage-2, Stage-6, Stage-7' -' Stage-13 has a backup stage: Stage-2' -' Stage-7 depends on stages: Stage-13' -' Stage-2' -' Stage-15 has a backup stage: Stage-1' -' Stage-10 depends on stages: Stage-15' -' Stage-1' -' Stage-0 is a root stage' -'' -'STAGE PLANS:' -' Stage: Stage-11' -' Conditional Operator' -'' -' Stage: Stage-14' -' Map Reduce Local Work' -' Alias -> Map Local Tables:' -' src5:src3:src2 ' -' Fetch Operator' -' limit: -1' -' Alias -> Map Local Operator Tree:' -' src5:src3:src2 ' -' TableScan' -' alias: src2' -' HashTable Sink Operator' -' condition expressions:' -' 0 {key} {value}' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[key]]' -' 1 [Column[key]]' -' Position of Big Table: 0' -'' -' Stage: Stage-9' -' Map Reduce' -' Alias -> Map Operator Tree:' -' src5:src3:src1 ' -' TableScan' -' alias: src1' -' Map Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 {key} {value}' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[key]]' -' 1 [Column[key]]' -' outputColumnNames: _col0, _col1' -' Position of Big Table: 0' -' Select Operator' -' expressions:' -' expr: _col0' -' type: string' -' expr: _col1' -' type: string' -' outputColumnNames: _col2, _col3' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -' Local Work:' -' Map Reduce Local Work' -'' -' Stage: Stage-8' -' Conditional Operator' -'' -' Stage: Stage-12' -' Map Reduce Local Work' -' Alias -> Map Local Tables:' -' $INTNAME ' -' Fetch Operator' -' limit: -1' -' Alias -> Map Local Operator Tree:' -' $INTNAME ' -' HashTable Sink Operator' -' condition expressions:' -' 0 ' -' 1 {_col3}' -' handleSkewJoin: false' -' keys:' -' 0 [Column[key]]' -' 1 [Column[_col2]]' -' Position of Big Table: 0' -'' -' Stage: Stage-6' -' Map Reduce' -' Alias -> Map Operator Tree:' -' src5:src4 ' -' TableScan' -' alias: src4' -' Map Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 ' -' 1 {_col3}' -' handleSkewJoin: false' -' keys:' -' 0 [Column[key]]' -' 1 [Column[_col2]]' -' outputColumnNames: _col7' -' Position of Big Table: 0' -' Select Operator' -' expressions:' -' expr: _col7' -' type: string' -' outputColumnNames: _col3' -' Select Operator' -' expressions:' -' expr: _col3' -' type: string' -' outputColumnNames: _col3' -' Group By Operator' -' aggregations:' -' expr: sum(hash(_col3))' -' bucketGroup: false' -' mode: hash' -' outputColumnNames: _col0' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -' Local Work:' -' Map Reduce Local Work' -'' -' Stage: Stage-3' -' Map Reduce' -' Alias -> Map Operator Tree:' -' file:!!{hive.exec.scratchdir}!! ' -' Reduce Output Operator' -' sort order: ' -' tag: -1' -' value expressions:' -' expr: _col0' -' type: bigint' -' Reduce Operator Tree:' -' Group By Operator' -' aggregations:' -' expr: sum(VALUE._col0)' -' bucketGroup: false' -' mode: mergepartial' -' outputColumnNames: _col0' -' Select Operator' -' expressions:' -' expr: _col0' -' type: bigint' -' outputColumnNames: _col0' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.TextInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' -'' -' Stage: Stage-13' -' Map Reduce Local Work' -' Alias -> Map Local Tables:' -' src5:src4 ' -' Fetch Operator' -' limit: -1' -' Alias -> Map Local Operator Tree:' -' src5:src4 ' -' TableScan' -' alias: src4' -' HashTable Sink Operator' -' condition expressions:' -' 0 ' -' 1 {_col3}' -' handleSkewJoin: false' -' keys:' -' 0 [Column[key]]' -' 1 [Column[_col2]]' -' Position of Big Table: 1' -'' -' Stage: Stage-7' -' Map Reduce' -' Alias -> Map Operator Tree:' -' $INTNAME ' -' Map Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 ' -' 1 {_col3}' -' handleSkewJoin: false' -' keys:' -' 0 [Column[key]]' -' 1 [Column[_col2]]' -' outputColumnNames: _col7' -' Position of Big Table: 1' -' Select Operator' -' expressions:' -' expr: _col7' -' type: string' -' outputColumnNames: _col3' -' Select Operator' -' expressions:' -' expr: _col3' -' type: string' -' outputColumnNames: _col3' -' Group By Operator' -' aggregations:' -' expr: sum(hash(_col3))' -' bucketGroup: false' -' mode: hash' -' outputColumnNames: _col0' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -' Local Work:' -' Map Reduce Local Work' -'' -' Stage: Stage-2' -' Map Reduce' -' Alias -> Map Operator Tree:' -' $INTNAME ' -' Reduce Output Operator' -' key expressions:' -' expr: _col2' -' type: string' -' sort order: +' -' Map-reduce partition columns:' -' expr: _col2' -' type: string' -' tag: 1' -' value expressions:' -' expr: _col3' -' type: string' -' src5:src4 ' -' TableScan' -' alias: src4' -' Reduce Output Operator' -' key expressions:' -' expr: key' -' type: string' -' sort order: +' -' Map-reduce partition columns:' -' expr: key' -' type: string' -' tag: 0' -' Reduce Operator Tree:' -' Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 ' -' 1 {VALUE._col3}' -' handleSkewJoin: false' -' outputColumnNames: _col7' -' Select Operator' -' expressions:' -' expr: _col7' -' type: string' -' outputColumnNames: _col3' -' Select Operator' -' expressions:' -' expr: _col3' -' type: string' -' outputColumnNames: _col3' -' Group By Operator' -' aggregations:' -' expr: sum(hash(_col3))' -' bucketGroup: false' -' mode: hash' -' outputColumnNames: _col0' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -'' -' Stage: Stage-15' -' Map Reduce Local Work' -' Alias -> Map Local Tables:' -' src5:src3:src1 ' -' Fetch Operator' -' limit: -1' -' Alias -> Map Local Operator Tree:' -' src5:src3:src1 ' -' TableScan' -' alias: src1' -' HashTable Sink Operator' -' condition expressions:' -' 0 {key} {value}' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[key]]' -' 1 [Column[key]]' -' Position of Big Table: 1' -'' -' Stage: Stage-10' -' Map Reduce' -' Alias -> Map Operator Tree:' -' src5:src3:src2 ' -' TableScan' -' alias: src2' -' Map Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 {key} {value}' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[key]]' -' 1 [Column[key]]' -' outputColumnNames: _col0, _col1' -' Position of Big Table: 1' -' Select Operator' -' expressions:' -' expr: _col0' -' type: string' -' expr: _col1' -' type: string' -' outputColumnNames: _col2, _col3' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -' Local Work:' -' Map Reduce Local Work' -'' -' Stage: Stage-1' -' Map Reduce' -' Alias -> Map Operator Tree:' -' src5:src3:src1 ' -' TableScan' -' alias: src1' -' Reduce Output Operator' -' key expressions:' -' expr: key' -' type: string' -' sort order: +' -' Map-reduce partition columns:' -' expr: key' -' type: string' -' tag: 0' -' value expressions:' -' expr: key' -' type: string' -' expr: value' -' type: string' -' src5:src3:src2 ' -' TableScan' -' alias: src2' -' Reduce Output Operator' -' key expressions:' -' expr: key' -' type: string' -' sort order: +' -' Map-reduce partition columns:' -' expr: key' -' type: string' -' tag: 1' -' Reduce Operator Tree:' -' Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 {VALUE._col0} {VALUE._col1}' -' 1 ' -' handleSkewJoin: false' -' outputColumnNames: _col0, _col1' -' Select Operator' -' expressions:' -' expr: _col0' -' type: string' -' expr: _col1' -' type: string' -' outputColumnNames: _col2, _col3' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -'' -' Stage: Stage-0' -' Fetch Operator' -' limit: -1' -'' -'' -405 rows selected ->>> ->>> SELECT sum(hash(src5.src1_value)) FROM (SELECT src3.*, src4.value as src4_value, src4.key as src4_key FROM src src4 JOIN (SELECT src2.*, src1.key as src1_key, src1.value as src1_value FROM src src1 JOIN src src2 ON src1.key = src2.key) src3 ON src3.src1_key = src4.key) src5; -'_c0' -'344337359100' -1 row selected ->>> !record
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join23.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/beelinepositive/auto_join23.q.out b/ql/src/test/results/beelinepositive/auto_join23.q.out deleted file mode 100644 index 2e07a5c..0000000 --- a/ql/src/test/results/beelinepositive/auto_join23.q.out +++ /dev/null @@ -1,362 +0,0 @@ -Saving all output to "!!{outputDirectory}!!/auto_join23.q.raw". Enter "record" with no arguments to stop it. ->>> !run !!{qFileDirectory}!!/auto_join23.q ->>> set hive.auto.convert.join = true; -No rows affected ->>> ->>> explain -SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value; -'Explain' -'ABSTRACT SYNTAX TREE:' -' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (and (< (. (TOK_TABLE_OR_COL src1) key) 10) (< (. (TOK_TABLE_OR_COL src2) key) 10))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) value)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) value)))))' -'' -'STAGE DEPENDENCIES:' -' Stage-6 is a root stage , consists of Stage-7, Stage-8, Stage-1' -' Stage-7 has a backup stage: Stage-1' -' Stage-4 depends on stages: Stage-7' -' Stage-2 depends on stages: Stage-1, Stage-4, Stage-5' -' Stage-8 has a backup stage: Stage-1' -' Stage-5 depends on stages: Stage-8' -' Stage-1' -' Stage-0 is a root stage' -'' -'STAGE PLANS:' -' Stage: Stage-6' -' Conditional Operator' -'' -' Stage: Stage-7' -' Map Reduce Local Work' -' Alias -> Map Local Tables:' -' src2 ' -' Fetch Operator' -' limit: -1' -' Alias -> Map Local Operator Tree:' -' src2 ' -' TableScan' -' alias: src2' -' Filter Operator' -' predicate:' -' expr: (key < 10)' -' type: boolean' -' HashTable Sink Operator' -' condition expressions:' -' 0 {key} {value}' -' 1 {key} {value}' -' handleSkewJoin: false' -' keys:' -' 0 []' -' 1 []' -' Position of Big Table: 0' -'' -' Stage: Stage-4' -' Map Reduce' -' Alias -> Map Operator Tree:' -' src1 ' -' TableScan' -' alias: src1' -' Filter Operator' -' predicate:' -' expr: (key < 10)' -' type: boolean' -' Map Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 {key} {value}' -' 1 {key} {value}' -' handleSkewJoin: false' -' keys:' -' 0 []' -' 1 []' -' outputColumnNames: _col0, _col1, _col4, _col5' -' Position of Big Table: 0' -' Select Operator' -' expressions:' -' expr: _col0' -' type: string' -' expr: _col1' -' type: string' -' expr: _col4' -' type: string' -' expr: _col5' -' type: string' -' outputColumnNames: _col0, _col1, _col2, _col3' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -' Local Work:' -' Map Reduce Local Work' -'' -' Stage: Stage-2' -' Map Reduce' -' Alias -> Map Operator Tree:' -' file:!!{hive.exec.scratchdir}!! ' -' Reduce Output Operator' -' key expressions:' -' expr: _col0' -' type: string' -' expr: _col1' -' type: string' -' expr: _col2' -' type: string' -' expr: _col3' -' type: string' -' sort order: ++++' -' tag: -1' -' value expressions:' -' expr: _col0' -' type: string' -' expr: _col1' -' type: string' -' expr: _col2' -' type: string' -' expr: _col3' -' type: string' -' Reduce Operator Tree:' -' Extract' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.TextInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' -'' -' Stage: Stage-8' -' Map Reduce Local Work' -' Alias -> Map Local Tables:' -' src1 ' -' Fetch Operator' -' limit: -1' -' Alias -> Map Local Operator Tree:' -' src1 ' -' TableScan' -' alias: src1' -' Filter Operator' -' predicate:' -' expr: (key < 10)' -' type: boolean' -' HashTable Sink Operator' -' condition expressions:' -' 0 {key} {value}' -' 1 {key} {value}' -' handleSkewJoin: false' -' keys:' -' 0 []' -' 1 []' -' Position of Big Table: 1' -'' -' Stage: Stage-5' -' Map Reduce' -' Alias -> Map Operator Tree:' -' src2 ' -' TableScan' -' alias: src2' -' Filter Operator' -' predicate:' -' expr: (key < 10)' -' type: boolean' -' Map Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 {key} {value}' -' 1 {key} {value}' -' handleSkewJoin: false' -' keys:' -' 0 []' -' 1 []' -' outputColumnNames: _col0, _col1, _col4, _col5' -' Position of Big Table: 1' -' Select Operator' -' expressions:' -' expr: _col0' -' type: string' -' expr: _col1' -' type: string' -' expr: _col4' -' type: string' -' expr: _col5' -' type: string' -' outputColumnNames: _col0, _col1, _col2, _col3' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -' Local Work:' -' Map Reduce Local Work' -'' -' Stage: Stage-1' -' Map Reduce' -' Alias -> Map Operator Tree:' -' src1 ' -' TableScan' -' alias: src1' -' Filter Operator' -' predicate:' -' expr: (key < 10)' -' type: boolean' -' Reduce Output Operator' -' sort order: ' -' tag: 0' -' value expressions:' -' expr: key' -' type: string' -' expr: value' -' type: string' -' src2 ' -' TableScan' -' alias: src2' -' Filter Operator' -' predicate:' -' expr: (key < 10)' -' type: boolean' -' Reduce Output Operator' -' sort order: ' -' tag: 1' -' value expressions:' -' expr: key' -' type: string' -' expr: value' -' type: string' -' Reduce Operator Tree:' -' Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 {VALUE._col0} {VALUE._col1}' -' 1 {VALUE._col0} {VALUE._col1}' -' handleSkewJoin: false' -' outputColumnNames: _col0, _col1, _col4, _col5' -' Select Operator' -' expressions:' -' expr: _col0' -' type: string' -' expr: _col1' -' type: string' -' expr: _col4' -' type: string' -' expr: _col5' -' type: string' -' outputColumnNames: _col0, _col1, _col2, _col3' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -'' -' Stage: Stage-0' -' Fetch Operator' -' limit: -1' -'' -'' -248 rows selected ->>> ->>> SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value; -'key','value','key','value' -'0','val_0','0','val_0' -'0','val_0','0','val_0' -'0','val_0','0','val_0' -'0','val_0','0','val_0' -'0','val_0','0','val_0' -'0','val_0','0','val_0' -'0','val_0','0','val_0' -'0','val_0','0','val_0' -'0','val_0','0','val_0' -'0','val_0','2','val_2' -'0','val_0','2','val_2' -'0','val_0','2','val_2' -'0','val_0','4','val_4' -'0','val_0','4','val_4' -'0','val_0','4','val_4' -'0','val_0','5','val_5' -'0','val_0','5','val_5' -'0','val_0','5','val_5' -'0','val_0','5','val_5' -'0','val_0','5','val_5' -'0','val_0','5','val_5' -'0','val_0','5','val_5' -'0','val_0','5','val_5' -'0','val_0','5','val_5' -'0','val_0','8','val_8' -'0','val_0','8','val_8' -'0','val_0','8','val_8' -'0','val_0','9','val_9' -'0','val_0','9','val_9' -'0','val_0','9','val_9' -'2','val_2','0','val_0' -'2','val_2','0','val_0' -'2','val_2','0','val_0' -'2','val_2','2','val_2' -'2','val_2','4','val_4' -'2','val_2','5','val_5' -'2','val_2','5','val_5' -'2','val_2','5','val_5' -'2','val_2','8','val_8' -'2','val_2','9','val_9' -'4','val_4','0','val_0' -'4','val_4','0','val_0' -'4','val_4','0','val_0' -'4','val_4','2','val_2' -'4','val_4','4','val_4' -'4','val_4','5','val_5' -'4','val_4','5','val_5' -'4','val_4','5','val_5' -'4','val_4','8','val_8' -'4','val_4','9','val_9' -'5','val_5','0','val_0' -'5','val_5','0','val_0' -'5','val_5','0','val_0' -'5','val_5','0','val_0' -'5','val_5','0','val_0' -'5','val_5','0','val_0' -'5','val_5','0','val_0' -'5','val_5','0','val_0' -'5','val_5','0','val_0' -'5','val_5','2','val_2' -'5','val_5','2','val_2' -'5','val_5','2','val_2' -'5','val_5','4','val_4' -'5','val_5','4','val_4' -'5','val_5','4','val_4' -'5','val_5','5','val_5' -'5','val_5','5','val_5' -'5','val_5','5','val_5' -'5','val_5','5','val_5' -'5','val_5','5','val_5' -'5','val_5','5','val_5' -'5','val_5','5','val_5' -'5','val_5','5','val_5' -'5','val_5','5','val_5' -'5','val_5','8','val_8' -'5','val_5','8','val_8' -'5','val_5','8','val_8' -'5','val_5','9','val_9' -'5','val_5','9','val_9' -'5','val_5','9','val_9' -'8','val_8','0','val_0' -'8','val_8','0','val_0' -'8','val_8','0','val_0' -'8','val_8','2','val_2' -'8','val_8','4','val_4' -'8','val_8','5','val_5' -'8','val_8','5','val_5' -'8','val_8','5','val_5' -'8','val_8','8','val_8' -'8','val_8','9','val_9' -'9','val_9','0','val_0' -'9','val_9','0','val_0' -'9','val_9','0','val_0' -'9','val_9','2','val_2' -'9','val_9','4','val_4' -'9','val_9','5','val_5' -'9','val_9','5','val_5' -'9','val_9','5','val_5' -'9','val_9','8','val_8' -'9','val_9','9','val_9' -100 rows selected ->>> !record http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join24.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/beelinepositive/auto_join24.q.out b/ql/src/test/results/beelinepositive/auto_join24.q.out deleted file mode 100644 index 9d8ea5c..0000000 --- a/ql/src/test/results/beelinepositive/auto_join24.q.out +++ /dev/null @@ -1,249 +0,0 @@ -Saving all output to "!!{outputDirectory}!!/auto_join24.q.raw". Enter "record" with no arguments to stop it. ->>> !run !!{qFileDirectory}!!/auto_join24.q ->>> set hive.auto.convert.join = true; -No rows affected ->>> ->>> create table tst1(key STRING, cnt INT); -No rows affected ->>> ->>> INSERT OVERWRITE TABLE tst1 -SELECT a.key, count(1) FROM src a group by a.key; -'_col0','_col1' -No rows selected ->>> ->>> explain -SELECT sum(a.cnt) FROM tst1 a JOIN tst1 b ON a.key = b.key; -'Explain' -'ABSTRACT SYNTAX TREE:' -' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME tst1) a) (TOK_TABREF (TOK_TABNAME tst1) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (. (TOK_TABLE_OR_COL a) cnt))))))' -'' -'STAGE DEPENDENCIES:' -' Stage-6 is a root stage , consists of Stage-7, Stage-8, Stage-1' -' Stage-7 has a backup stage: Stage-1' -' Stage-4 depends on stages: Stage-7' -' Stage-2 depends on stages: Stage-1, Stage-4, Stage-5' -' Stage-8 has a backup stage: Stage-1' -' Stage-5 depends on stages: Stage-8' -' Stage-1' -' Stage-0 is a root stage' -'' -'STAGE PLANS:' -' Stage: Stage-6' -' Conditional Operator' -'' -' Stage: Stage-7' -' Map Reduce Local Work' -' Alias -> Map Local Tables:' -' b ' -' Fetch Operator' -' limit: -1' -' Alias -> Map Local Operator Tree:' -' b ' -' TableScan' -' alias: b' -' HashTable Sink Operator' -' condition expressions:' -' 0 {cnt}' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[key]]' -' 1 [Column[key]]' -' Position of Big Table: 0' -'' -' Stage: Stage-4' -' Map Reduce' -' Alias -> Map Operator Tree:' -' a ' -' TableScan' -' alias: a' -' Map Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 {cnt}' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[key]]' -' 1 [Column[key]]' -' outputColumnNames: _col1' -' Position of Big Table: 0' -' Select Operator' -' expressions:' -' expr: _col1' -' type: int' -' outputColumnNames: _col1' -' Group By Operator' -' aggregations:' -' expr: sum(_col1)' -' bucketGroup: false' -' mode: hash' -' outputColumnNames: _col0' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -' Local Work:' -' Map Reduce Local Work' -'' -' Stage: Stage-2' -' Map Reduce' -' Alias -> Map Operator Tree:' -' file:!!{hive.exec.scratchdir}!! ' -' Reduce Output Operator' -' sort order: ' -' tag: -1' -' value expressions:' -' expr: _col0' -' type: bigint' -' Reduce Operator Tree:' -' Group By Operator' -' aggregations:' -' expr: sum(VALUE._col0)' -' bucketGroup: false' -' mode: mergepartial' -' outputColumnNames: _col0' -' Select Operator' -' expressions:' -' expr: _col0' -' type: bigint' -' outputColumnNames: _col0' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.TextInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' -'' -' Stage: Stage-8' -' Map Reduce Local Work' -' Alias -> Map Local Tables:' -' a ' -' Fetch Operator' -' limit: -1' -' Alias -> Map Local Operator Tree:' -' a ' -' TableScan' -' alias: a' -' HashTable Sink Operator' -' condition expressions:' -' 0 {cnt}' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[key]]' -' 1 [Column[key]]' -' Position of Big Table: 1' -'' -' Stage: Stage-5' -' Map Reduce' -' Alias -> Map Operator Tree:' -' b ' -' TableScan' -' alias: b' -' Map Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 {cnt}' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[key]]' -' 1 [Column[key]]' -' outputColumnNames: _col1' -' Position of Big Table: 1' -' Select Operator' -' expressions:' -' expr: _col1' -' type: int' -' outputColumnNames: _col1' -' Group By Operator' -' aggregations:' -' expr: sum(_col1)' -' bucketGroup: false' -' mode: hash' -' outputColumnNames: _col0' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -' Local Work:' -' Map Reduce Local Work' -'' -' Stage: Stage-1' -' Map Reduce' -' Alias -> Map Operator Tree:' -' a ' -' TableScan' -' alias: a' -' Reduce Output Operator' -' key expressions:' -' expr: key' -' type: string' -' sort order: +' -' Map-reduce partition columns:' -' expr: key' -' type: string' -' tag: 0' -' value expressions:' -' expr: cnt' -' type: int' -' b ' -' TableScan' -' alias: b' -' Reduce Output Operator' -' key expressions:' -' expr: key' -' type: string' -' sort order: +' -' Map-reduce partition columns:' -' expr: key' -' type: string' -' tag: 1' -' Reduce Operator Tree:' -' Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 {VALUE._col1}' -' 1 ' -' handleSkewJoin: false' -' outputColumnNames: _col1' -' Select Operator' -' expressions:' -' expr: _col1' -' type: int' -' outputColumnNames: _col1' -' Group By Operator' -' aggregations:' -' expr: sum(_col1)' -' bucketGroup: false' -' mode: hash' -' outputColumnNames: _col0' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -'' -' Stage: Stage-0' -' Fetch Operator' -' limit: -1' -'' -'' -224 rows selected ->>> ->>> SELECT sum(a.cnt) FROM tst1 a JOIN tst1 b ON a.key = b.key; -'_c0' -'500' -1 row selected ->>> ->>> ->>> !record http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join25.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/beelinepositive/auto_join25.q.out b/ql/src/test/results/beelinepositive/auto_join25.q.out deleted file mode 100644 index 450db9a..0000000 --- a/ql/src/test/results/beelinepositive/auto_join25.q.out +++ /dev/null @@ -1,52 +0,0 @@ -Saving all output to "!!{outputDirectory}!!/auto_join25.q.raw". Enter "record" with no arguments to stop it. ->>> !run !!{qFileDirectory}!!/auto_join25.q ->>> set hive.auto.convert.join = true; -No rows affected ->>> set hive.mapjoin.localtask.max.memory.usage = 0.0001; -No rows affected ->>> set hive.mapjoin.check.memory.rows = 2; -No rows affected ->>> ->>> CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE; -No rows affected ->>> ->>> FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value -where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11'); -'_col0','_col1' -No rows selected ->>> ->>> SELECT sum(hash(dest1.key,dest1.value)) FROM dest1; -'_c0' -'407444119660' -1 row selected ->>> ->>> ->>> ->>> CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE; -No rows affected ->>> ->>> FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) -INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value; -'_col0','_col1' -No rows selected ->>> ->>> SELECT sum(hash(dest_j2.key,dest_j2.value)) FROM dest_j2; -'_c0' -'33815990627' -1 row selected ->>> ->>> CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE; -No rows affected ->>> ->>> FROM src src1 JOIN src src2 ON (src1.key = src2.key) -INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value; -'_col0','_col1' -No rows selected ->>> ->>> SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1; -'_c0' -'101861029915' -1 row selected ->>> ->>> !record http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join26.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/beelinepositive/auto_join26.q.out b/ql/src/test/results/beelinepositive/auto_join26.q.out deleted file mode 100644 index ef212b8..0000000 --- a/ql/src/test/results/beelinepositive/auto_join26.q.out +++ /dev/null @@ -1,299 +0,0 @@ -Saving all output to "!!{outputDirectory}!!/auto_join26.q.raw". Enter "record" with no arguments to stop it. ->>> !run !!{qFileDirectory}!!/auto_join26.q ->>> CREATE TABLE dest_j1(key INT, cnt INT); -No rows affected ->>> set hive.auto.convert.join = true; -No rows affected ->>> EXPLAIN -INSERT OVERWRITE TABLE dest_j1 -SELECT x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key; -'Explain' -'ABSTRACT SYNTAX TREE:' -' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src1) x) (TOK_TABREF (TOK_TABNAME src) y) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL y) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_j1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL x) key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL x) key))))' -'' -'STAGE DEPENDENCIES:' -' Stage-7 is a root stage , consists of Stage-8, Stage-9, Stage-1' -' Stage-8 has a backup stage: Stage-1' -' Stage-5 depends on stages: Stage-8' -' Stage-2 depends on stages: Stage-1, Stage-5, Stage-6' -' Stage-0 depends on stages: Stage-2' -' Stage-3 depends on stages: Stage-0' -' Stage-9 has a backup stage: Stage-1' -' Stage-6 depends on stages: Stage-9' -' Stage-1' -'' -'STAGE PLANS:' -' Stage: Stage-7' -' Conditional Operator' -'' -' Stage: Stage-8' -' Map Reduce Local Work' -' Alias -> Map Local Tables:' -' y ' -' Fetch Operator' -' limit: -1' -' Alias -> Map Local Operator Tree:' -' y ' -' TableScan' -' alias: y' -' HashTable Sink Operator' -' condition expressions:' -' 0 {key}' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[key]]' -' 1 [Column[key]]' -' Position of Big Table: 0' -'' -' Stage: Stage-5' -' Map Reduce' -' Alias -> Map Operator Tree:' -' x ' -' TableScan' -' alias: x' -' Map Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 {key}' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[key]]' -' 1 [Column[key]]' -' outputColumnNames: _col0' -' Position of Big Table: 0' -' Select Operator' -' expressions:' -' expr: _col0' -' type: string' -' outputColumnNames: _col0' -' Group By Operator' -' aggregations:' -' expr: count(1)' -' bucketGroup: false' -' keys:' -' expr: _col0' -' type: string' -' mode: hash' -' outputColumnNames: _col0, _col1' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -' Local Work:' -' Map Reduce Local Work' -'' -' Stage: Stage-2' -' Map Reduce' -' Alias -> Map Operator Tree:' -' file:!!{hive.exec.scratchdir}!! ' -' Reduce Output Operator' -' key expressions:' -' expr: _col0' -' type: string' -' sort order: +' -' Map-reduce partition columns:' -' expr: _col0' -' type: string' -' tag: -1' -' value expressions:' -' expr: _col1' -' type: bigint' -' Reduce Operator Tree:' -' Group By Operator' -' aggregations:' -' expr: count(VALUE._col0)' -' bucketGroup: false' -' keys:' -' expr: KEY._col0' -' type: string' -' mode: mergepartial' -' outputColumnNames: _col0, _col1' -' Select Operator' -' expressions:' -' expr: _col0' -' type: string' -' expr: _col1' -' type: bigint' -' outputColumnNames: _col0, _col1' -' Select Operator' -' expressions:' -' expr: UDFToInteger(_col0)' -' type: int' -' expr: UDFToInteger(_col1)' -' type: int' -' outputColumnNames: _col0, _col1' -' File Output Operator' -' compressed: false' -' GlobalTableId: 1' -' table:' -' input format: org.apache.hadoop.mapred.TextInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' -' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' -' name: auto_join26.dest_j1' -'' -' Stage: Stage-0' -' Move Operator' -' tables:' -' replace: true' -' table:' -' input format: org.apache.hadoop.mapred.TextInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' -' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' -' name: auto_join26.dest_j1' -'' -' Stage: Stage-3' -' Stats-Aggr Operator' -'' -' Stage: Stage-9' -' Map Reduce Local Work' -' Alias -> Map Local Tables:' -' x ' -' Fetch Operator' -' limit: -1' -' Alias -> Map Local Operator Tree:' -' x ' -' TableScan' -' alias: x' -' HashTable Sink Operator' -' condition expressions:' -' 0 {key}' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[key]]' -' 1 [Column[key]]' -' Position of Big Table: 1' -'' -' Stage: Stage-6' -' Map Reduce' -' Alias -> Map Operator Tree:' -' y ' -' TableScan' -' alias: y' -' Map Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 {key}' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[key]]' -' 1 [Column[key]]' -' outputColumnNames: _col0' -' Position of Big Table: 1' -' Select Operator' -' expressions:' -' expr: _col0' -' type: string' -' outputColumnNames: _col0' -' Group By Operator' -' aggregations:' -' expr: count(1)' -' bucketGroup: false' -' keys:' -' expr: _col0' -' type: string' -' mode: hash' -' outputColumnNames: _col0, _col1' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -' Local Work:' -' Map Reduce Local Work' -'' -' Stage: Stage-1' -' Map Reduce' -' Alias -> Map Operator Tree:' -' x ' -' TableScan' -' alias: x' -' Reduce Output Operator' -' key expressions:' -' expr: key' -' type: string' -' sort order: +' -' Map-reduce partition columns:' -' expr: key' -' type: string' -' tag: 0' -' value expressions:' -' expr: key' -' type: string' -' y ' -' TableScan' -' alias: y' -' Reduce Output Operator' -' key expressions:' -' expr: key' -' type: string' -' sort order: +' -' Map-reduce partition columns:' -' expr: key' -' type: string' -' tag: 1' -' Reduce Operator Tree:' -' Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 {VALUE._col0}' -' 1 ' -' handleSkewJoin: false' -' outputColumnNames: _col0' -' Select Operator' -' expressions:' -' expr: _col0' -' type: string' -' outputColumnNames: _col0' -' Group By Operator' -' aggregations:' -' expr: count(1)' -' bucketGroup: false' -' keys:' -' expr: _col0' -' type: string' -' mode: hash' -' outputColumnNames: _col0, _col1' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -'' -'' -263 rows selected ->>> ->>> INSERT OVERWRITE TABLE dest_j1 -SELECT x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key; -'_col0','_col1' -No rows selected ->>> ->>> select * from dest_j1 x order by x.key; -'key','cnt' -'66','1' -'98','2' -'128','3' -'146','2' -'150','1' -'213','2' -'224','2' -'238','2' -'255','2' -'273','3' -'278','2' -'311','3' -'369','3' -'401','5' -'406','4' -15 rows selected ->>> !record http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join27.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/beelinepositive/auto_join27.q.out b/ql/src/test/results/beelinepositive/auto_join27.q.out deleted file mode 100644 index 862f2da..0000000 --- a/ql/src/test/results/beelinepositive/auto_join27.q.out +++ /dev/null @@ -1,421 +0,0 @@ -Saving all output to "!!{outputDirectory}!!/auto_join27.q.raw". Enter "record" with no arguments to stop it. ->>> !run !!{qFileDirectory}!!/auto_join27.q ->>> set hive.auto.convert.join = true; -No rows affected ->>> ->>> explain -SELECT count(1) -FROM -( -SELECT src.key, src.value from src -UNION ALL -SELECT DISTINCT src.key, src.value from src -) src_12 -JOIN -(SELECT src.key as k, src.value as v from src) src3 -ON src_12.key = src3.k AND src3.k < 200; -'Explain' -'ABSTRACT SYNTAX TREE:' -' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value))))) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECTDI (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value)))))) src_12) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key) k) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value) v)))) src3) (AND (= (. (TOK_TABLE_OR_COL src_12) key) (. (TOK_TABLE_OR_COL src3) k)) (< (. (TOK_TABLE_OR_COL src3) k) 200)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1)))))' -'' -'STAGE DEPENDENCIES:' -' Stage-1 is a root stage' -' Stage-8 depends on stages: Stage-1 , consists of Stage-9, Stage-10, Stage-2' -' Stage-9 has a backup stage: Stage-2' -' Stage-6 depends on stages: Stage-9' -' Stage-3 depends on stages: Stage-2, Stage-6, Stage-7' -' Stage-10 has a backup stage: Stage-2' -' Stage-7 depends on stages: Stage-10' -' Stage-2' -' Stage-0 is a root stage' -'' -'STAGE PLANS:' -' Stage: Stage-1' -' Map Reduce' -' Alias -> Map Operator Tree:' -' null-subquery2:src_12-subquery2:src ' -' TableScan' -' alias: src' -' Filter Operator' -' predicate:' -' expr: (key < 200)' -' type: boolean' -' Select Operator' -' expressions:' -' expr: key' -' type: string' -' expr: value' -' type: string' -' outputColumnNames: key, value' -' Group By Operator' -' bucketGroup: false' -' keys:' -' expr: key' -' type: string' -' expr: value' -' type: string' -' mode: hash' -' outputColumnNames: _col0, _col1' -' Reduce Output Operator' -' key expressions:' -' expr: _col0' -' type: string' -' expr: _col1' -' type: string' -' sort order: ++' -' Map-reduce partition columns:' -' expr: _col0' -' type: string' -' expr: _col1' -' type: string' -' tag: -1' -' Reduce Operator Tree:' -' Group By Operator' -' bucketGroup: false' -' keys:' -' expr: KEY._col0' -' type: string' -' expr: KEY._col1' -' type: string' -' mode: mergepartial' -' outputColumnNames: _col0, _col1' -' Select Operator' -' expressions:' -' expr: _col0' -' type: string' -' expr: _col1' -' type: string' -' outputColumnNames: _col0, _col1' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -'' -' Stage: Stage-8' -' Conditional Operator' -'' -' Stage: Stage-9' -' Map Reduce Local Work' -' Alias -> Map Local Tables:' -' src3:src ' -' Fetch Operator' -' limit: -1' -' Alias -> Map Local Operator Tree:' -' src3:src ' -' TableScan' -' alias: src' -' Filter Operator' -' predicate:' -' expr: (key < 200)' -' type: boolean' -' Select Operator' -' expressions:' -' expr: key' -' type: string' -' outputColumnNames: _col0' -' HashTable Sink Operator' -' condition expressions:' -' 0 ' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[_col0]]' -' 1 [Column[_col0]]' -' Position of Big Table: 0' -'' -' Stage: Stage-6' -' Map Reduce' -' Alias -> Map Operator Tree:' -' file:!!{hive.exec.scratchdir}!! ' -' TableScan' -' Union' -' Map Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 ' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[_col0]]' -' 1 [Column[_col0]]' -' Position of Big Table: 0' -' Select Operator' -' Group By Operator' -' aggregations:' -' expr: count(1)' -' bucketGroup: false' -' mode: hash' -' outputColumnNames: _col0' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -' null-subquery1:src_12-subquery1:src ' -' TableScan' -' alias: src' -' Filter Operator' -' predicate:' -' expr: (key < 200)' -' type: boolean' -' Select Operator' -' expressions:' -' expr: key' -' type: string' -' expr: value' -' type: string' -' outputColumnNames: _col0, _col1' -' Union' -' Map Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 ' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[_col0]]' -' 1 [Column[_col0]]' -' Position of Big Table: 0' -' Select Operator' -' Group By Operator' -' aggregations:' -' expr: count(1)' -' bucketGroup: false' -' mode: hash' -' outputColumnNames: _col0' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -' Local Work:' -' Map Reduce Local Work' -'' -' Stage: Stage-3' -' Map Reduce' -' Alias -> Map Operator Tree:' -' file:!!{hive.exec.scratchdir}!! ' -' Reduce Output Operator' -' sort order: ' -' tag: -1' -' value expressions:' -' expr: _col0' -' type: bigint' -' Reduce Operator Tree:' -' Group By Operator' -' aggregations:' -' expr: count(VALUE._col0)' -' bucketGroup: false' -' mode: mergepartial' -' outputColumnNames: _col0' -' Select Operator' -' expressions:' -' expr: _col0' -' type: bigint' -' outputColumnNames: _col0' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.TextInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' -'' -' Stage: Stage-10' -' Map Reduce Local Work' -' Alias -> Map Local Tables:' -' file:!!{hive.exec.scratchdir}!! ' -' Fetch Operator' -' limit: -1' -' null-subquery1:src_12-subquery1:src ' -' Fetch Operator' -' limit: -1' -' Alias -> Map Local Operator Tree:' -' file:!!{hive.exec.scratchdir}!! ' -' TableScan' -' Union' -' HashTable Sink Operator' -' condition expressions:' -' 0 ' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[_col0]]' -' 1 [Column[_col0]]' -' Position of Big Table: 1' -' null-subquery1:src_12-subquery1:src ' -' TableScan' -' alias: src' -' Filter Operator' -' predicate:' -' expr: (key < 200)' -' type: boolean' -' Select Operator' -' expressions:' -' expr: key' -' type: string' -' expr: value' -' type: string' -' outputColumnNames: _col0, _col1' -' Union' -' HashTable Sink Operator' -' condition expressions:' -' 0 ' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[_col0]]' -' 1 [Column[_col0]]' -' Position of Big Table: 1' -'' -' Stage: Stage-7' -' Map Reduce' -' Alias -> Map Operator Tree:' -' src3:src ' -' TableScan' -' alias: src' -' Filter Operator' -' predicate:' -' expr: (key < 200)' -' type: boolean' -' Select Operator' -' expressions:' -' expr: key' -' type: string' -' outputColumnNames: _col0' -' Map Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 ' -' 1 ' -' handleSkewJoin: false' -' keys:' -' 0 [Column[_col0]]' -' 1 [Column[_col0]]' -' Position of Big Table: 1' -' Select Operator' -' Group By Operator' -' aggregations:' -' expr: count(1)' -' bucketGroup: false' -' mode: hash' -' outputColumnNames: _col0' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -' Local Work:' -' Map Reduce Local Work' -'' -' Stage: Stage-2' -' Map Reduce' -' Alias -> Map Operator Tree:' -' file:!!{hive.exec.scratchdir}!! ' -' TableScan' -' Union' -' Reduce Output Operator' -' key expressions:' -' expr: _col0' -' type: string' -' sort order: +' -' Map-reduce partition columns:' -' expr: _col0' -' type: string' -' tag: 0' -' null-subquery1:src_12-subquery1:src ' -' TableScan' -' alias: src' -' Filter Operator' -' predicate:' -' expr: (key < 200)' -' type: boolean' -' Select Operator' -' expressions:' -' expr: key' -' type: string' -' expr: value' -' type: string' -' outputColumnNames: _col0, _col1' -' Union' -' Reduce Output Operator' -' key expressions:' -' expr: _col0' -' type: string' -' sort order: +' -' Map-reduce partition columns:' -' expr: _col0' -' type: string' -' tag: 0' -' src3:src ' -' TableScan' -' alias: src' -' Filter Operator' -' predicate:' -' expr: (key < 200)' -' type: boolean' -' Select Operator' -' expressions:' -' expr: key' -' type: string' -' outputColumnNames: _col0' -' Reduce Output Operator' -' key expressions:' -' expr: _col0' -' type: string' -' sort order: +' -' Map-reduce partition columns:' -' expr: _col0' -' type: string' -' tag: 1' -' Reduce Operator Tree:' -' Join Operator' -' condition map:' -' Inner Join 0 to 1' -' condition expressions:' -' 0 ' -' 1 ' -' handleSkewJoin: false' -' Select Operator' -' Group By Operator' -' aggregations:' -' expr: count(1)' -' bucketGroup: false' -' mode: hash' -' outputColumnNames: _col0' -' File Output Operator' -' compressed: false' -' GlobalTableId: 0' -' table:' -' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' -' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' -'' -' Stage: Stage-0' -' Fetch Operator' -' limit: -1' -'' -'' -387 rows selected ->>> ->>> ->>> SELECT count(1) -FROM -( -SELECT src.key, src.value from src -UNION ALL -SELECT DISTINCT src.key, src.value from src -) src_12 -JOIN -(SELECT src.key as k, src.value as v from src) src3 -ON src_12.key = src3.k AND src3.k < 200; -'_c0' -'548' -1 row selected ->>> !record