http://git-wip-us.apache.org/repos/asf/hive/blob/3bbc24d2/ql/src/test/results/clientpositive/llap/reduce_deduplicate_distinct.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/reduce_deduplicate_distinct.q.out b/ql/src/test/results/clientpositive/llap/reduce_deduplicate_distinct.q.out index 6864da4..2955ee6 100644 --- a/ql/src/test/results/clientpositive/llap/reduce_deduplicate_distinct.q.out +++ b/ql/src/test/results/clientpositive/llap/reduce_deduplicate_distinct.q.out @@ -39,22 +39,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: count_distinct_test - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: id (type: int), key (type: int), name (type: int) outputColumnNames: id, key, name - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(DISTINCT key), count(DISTINCT name) keys: id (type: int), key (type: int), name (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: llap LLAP IO: no inputs Reducer 2 @@ -65,10 +65,10 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -118,21 +118,21 @@ STAGE PLANS: Map Operator Tree: TableScan alias: count_distinct_test - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: id (type: int), key (type: int), name (type: int) outputColumnNames: id, key, name - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: id (type: int), key (type: int), name (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: int) - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: llap LLAP IO: no inputs Reducer 2 @@ -142,18 +142,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(DISTINCT _col1), count(DISTINCT _col2) keys: _col0 (type: int), _col1 (type: int), _col2 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Execution mode: llap Reduce Operator Tree: @@ -162,10 +162,10 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -215,21 +215,21 @@ STAGE PLANS: Map Operator Tree: TableScan alias: count_distinct_test - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: id (type: int), key (type: int), name (type: int) outputColumnNames: id, key, name - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: id (type: int), key (type: int), name (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: int) - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: llap LLAP IO: no inputs Reducer 2 @@ -239,18 +239,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(DISTINCT _col2), count(DISTINCT _col1) keys: _col0 (type: int), _col2 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Execution mode: llap Reduce Operator Tree: @@ -259,10 +259,10 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -312,21 +312,21 @@ STAGE PLANS: Map Operator Tree: TableScan alias: count_distinct_test - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: id (type: int), key (type: int), name (type: int) outputColumnNames: id, key, name - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: id (type: int), key (type: int), name (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: int) - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: llap LLAP IO: no inputs Reducer 2 @@ -336,18 +336,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(DISTINCT _col1), count(DISTINCT _col2) keys: _col0 (type: int), _col1 (type: int), _col2 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Execution mode: llap Reduce Operator Tree: @@ -356,10 +356,10 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -409,21 +409,21 @@ STAGE PLANS: Map Operator Tree: TableScan alias: count_distinct_test - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: id (type: int), key (type: int), name (type: int) outputColumnNames: id, key, name - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: id (type: int), key (type: int), name (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: int) - Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: llap LLAP IO: no inputs Reducer 2 @@ -433,18 +433,18 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(DISTINCT _col2), count(DISTINCT _col1) keys: _col0 (type: int), _col2 (type: int), _col1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Execution mode: llap Reduce Operator Tree: @@ -453,10 +453,10 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
http://git-wip-us.apache.org/repos/asf/hive/blob/3bbc24d2/ql/src/test/results/clientpositive/llap/sample1.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/sample1.q.out b/ql/src/test/results/clientpositive/llap/sample1.q.out index 5b69bb5..3458ee2 100644 --- a/ql/src/test/results/clientpositive/llap/sample1.q.out +++ b/ql/src/test/results/clientpositive/llap/sample1.q.out @@ -26,6 +26,9 @@ STAGE PLANS: Stage: Stage-1 Tez #### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: @@ -73,6 +76,22 @@ STAGE PLANS: TotalFiles: 1 GatherStats: true MultiFileSpray: false + Select Operator + expressions: _col0 (type: int), _col1 (type: string), '2008-04-08' (type: string), '11' (type: string) + outputColumnNames: key, value, dt, hr + Statistics: Num rows: 250 Data size: 68750 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll'), compute_stats(dt, 'hll'), compute_stats(hr, 'hll') + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 1744 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 1744 Basic stats: COMPLETE Column stats: COMPLETE + tag: -1 + value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>) + auto parallelism: false Execution mode: llap LLAP IO: no inputs Path -> Alias: @@ -128,6 +147,37 @@ STAGE PLANS: name: default.srcpart Truncated Path -> Alias: /srcpart/ds=2008-04-08/hr=11 [s] + Reducer 2 + Execution mode: llap + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2), compute_stats(VALUE._col3) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: COMPLETE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>:struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>:struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>:struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary> + escape.delim \ + hive.serialization.extend.additional.nesting.levels true + serialization.escape.crlf true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Stage: Stage-2 Dependency Collection @@ -164,6 +214,11 @@ STAGE PLANS: Stats Work Basic Stats Work: #### A masked pattern was here #### + Column Stats Desc: + Columns: key, value, dt, hr + Column Types: int, string, string, string + Table: default.dest1 + Is Table Level Stats: true PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s http://git-wip-us.apache.org/repos/asf/hive/blob/3bbc24d2/ql/src/test/results/clientpositive/llap/sample10.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/sample10.q.out b/ql/src/test/results/clientpositive/llap/sample10.q.out index f69dc3d..8552a3a 100644 --- a/ql/src/test/results/clientpositive/llap/sample10.q.out +++ b/ql/src/test/results/clientpositive/llap/sample10.q.out @@ -56,29 +56,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpartbucket - Statistics: Num rows: 40 Data size: 14776 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 40 Data size: 10760 Basic stats: COMPLETE Column stats: COMPLETE GatherStats: false Filter Operator isSamplingPred: true predicate: (((hash(key) & 2147483647) % 4) = 0) (type: boolean) sampleDesc: BUCKET 1 OUT OF 4 - Statistics: Num rows: 20 Data size: 7360 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 20 Data size: 5380 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 20 Data size: 7360 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 20 Data size: 5380 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) keys: ds (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) null sort order: a sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: true @@ -96,7 +96,7 @@ STAGE PLANS: ds 2008-04-08 hr 11 properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} bucket_count 4 bucket_field_name key column.name.delimiter , @@ -146,7 +146,7 @@ STAGE PLANS: ds 2008-04-08 hr 12 properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} bucket_count 4 bucket_field_name key column.name.delimiter , @@ -196,7 +196,7 @@ STAGE PLANS: ds 2008-04-09 hr 11 properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} bucket_count 4 bucket_field_name key column.name.delimiter , @@ -246,7 +246,7 @@ STAGE PLANS: ds 2008-04-09 hr 12 properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} + COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}} bucket_count 4 bucket_field_name key column.name.delimiter , @@ -301,12 +301,12 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) null sort order: a sort order: + - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: false @@ -317,13 +317,13 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat http://git-wip-us.apache.org/repos/asf/hive/blob/3bbc24d2/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part.q.out index 72b0f67..ccfac74 100644 --- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part.q.out +++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part.q.out @@ -77,14 +77,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_add_int_permute_select - Statistics: Num rows: 2 Data size: 586 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: insert_num (type: int), part (type: int), a (type: int), b (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 392 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 392 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -206,14 +206,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_add_int_string_permute_select - Statistics: Num rows: 2 Data size: 674 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: insert_num (type: int), part (type: int), a (type: int), b (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 392 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 392 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -393,14 +393,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_string_group_double - Statistics: Num rows: 5 Data size: 3190 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 500 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: insert_num (type: int), part (type: int), c1 (type: double), c2 (type: double), c3 (type: double), b (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 5 Data size: 1080 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 500 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1080 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 500 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -710,14 +710,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_numeric_group_string_group_multi_ints_string_group - Statistics: Num rows: 6 Data size: 19151 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1140 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: insert_num (type: int), part (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: string), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(50)), c8 (type: char(50)), c9 (type: char(5)), c10 (type: char(5)), c11 (type: char(5)), c12 (type: char(5)), c13 (type: varchar(50)), c14 (type: varchar(50)), c15 (type: varchar(50)), c16 (type: varchar(50)), c17 (type: varchar(5)), c18 (type: varchar(5)), c19 (type: varchar(5)), c20 (type: varchar(5)), b (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22 - Statistics: Num rows: 6 Data size: 16272 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1140 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 16272 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1140 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -893,14 +893,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_numeric_group_string_group_floating_string_group - Statistics: Num rows: 6 Data size: 17080 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 3300 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: insert_num (type: int), part (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: char(50)), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(7)), c8 (type: char(7)), c9 (type: char(7)), c10 (type: varchar(50)), c11 (type: varchar(50)), c12 (type: varchar(50)), c13 (type: varchar(7)), c14 (type: varchar(7)), c15 (type: varchar(7)), b (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17 - Statistics: Num rows: 6 Data size: 12564 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 3300 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 12564 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 3300 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1062,14 +1062,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_string_group_string_group_string - Statistics: Num rows: 6 Data size: 15466 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 2712 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: insert_num (type: int), part (type: int), c1 (type: char(50)), c2 (type: char(9)), c3 (type: varchar(50)), c4 (type: char(9)), c5 (type: varchar(50)), c6 (type: varchar(9)), c7 (type: string), c8 (type: char(50)), c9 (type: char(9)), c10 (type: string), b (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 - Statistics: Num rows: 6 Data size: 8808 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 2712 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 8808 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 2712 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1259,14 +1259,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_lower_to_higher_numeric_group_tinyint_to_bigint - Statistics: Num rows: 6 Data size: 5739 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: insert_num (type: int), part (type: int), c1 (type: smallint), c2 (type: int), c3 (type: bigint), c4 (type: decimal(38,18)), c5 (type: float), c6 (type: double), c7 (type: int), c8 (type: bigint), c9 (type: decimal(38,18)), c10 (type: float), c11 (type: double), c12 (type: bigint), c13 (type: decimal(38,18)), c14 (type: float), c15 (type: double), c16 (type: decimal(38,18)), c17 (type: float), c18 (type: double), b (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 6 Data size: 4344 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 4344 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1398,14 +1398,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_lower_to_higher_numeric_group_decimal_to_float - Statistics: Num rows: 6 Data size: 2771 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: insert_num (type: int), part (type: int), c1 (type: float), c2 (type: double), c3 (type: double), b (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 6 Data size: 1272 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 1272 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat http://git-wip-us.apache.org/repos/asf/hive/blob/3bbc24d2/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_complex.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_complex.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_complex.q.out index 76ea043..e86439b 100644 --- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_complex.q.out +++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_complex.q.out @@ -431,14 +431,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_add_various_various_struct2 - Statistics: Num rows: 8 Data size: 26640 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 8 Data size: 800 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: insert_num (type: int), part (type: int), b (type: string), s2 (type: struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 8 Data size: 21760 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 8 Data size: 800 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 8 Data size: 21760 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 8 Data size: 800 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat http://git-wip-us.apache.org/repos/asf/hive/blob/3bbc24d2/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_primitive.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_primitive.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_primitive.q.out index 63a8a34..eb9befc 100644 --- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_primitive.q.out +++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_part_all_primitive.q.out @@ -266,14 +266,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_various_various_boolean_to_bigint - Statistics: Num rows: 10 Data size: 16128 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 10 Data size: 1228 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: insert_num (type: int), part (type: int), c1 (type: boolean), c2 (type: boolean), c3 (type: boolean), c4 (type: boolean), c5 (type: boolean), c6 (type: boolean), c7 (type: boolean), c8 (type: boolean), c9 (type: boolean), c10 (type: tinyint), c11 (type: tinyint), c12 (type: tinyint), c13 (type: tinyint), c14 (type: tinyint), c15 (type: tinyint), c16 (type: tinyint), c17 (type: tinyint), c18 (type: tinyint), c19 (type: tinyint), c20 (type: tinyint), c21 (type: smallint), c22 (type: smallint), c23 (type: smallint), c24 (type: smallint), c25 (type: smallint), c26 (type: smallint), c27 (type: smallint), c28 (type: smallint), c29 (type: smallint), c30 (type: smallint), c31 (type: smallint), c32 (type: int), c33 (type: int), c34 (type: int), c35 (type: int), c36 (type: int), c37 (type: int), c38 (type: int), c39 (type: int), c40 (type: int), c41 (type: int), c42 (type: int), c43 (type: bigint), c44 (type: bigint), c45 (type: bigint), c46 (type: bigint), c4 7 (type: bigint), c48 (type: bigint), c49 (type: bigint), c50 (type: bigint), c51 (type: bigint), c52 (type: bigint), c53 (type: bigint), b (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44, _col45, _col46, _col47, _col48, _col49, _col50, _col51, _col52, _col53, _col54, _col55 - Statistics: Num rows: 10 Data size: 4480 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 10 Data size: 1228 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 4480 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 10 Data size: 1228 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -491,14 +491,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_various_various_decimal_to_double - Statistics: Num rows: 6 Data size: 17607 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: insert_num (type: int), part (type: int), c1 (type: decimal(38,18)), c2 (type: decimal(38,18)), c3 (type: decimal(38,18)), c4 (type: decimal(38,18)), c5 (type: decimal(38,18)), c6 (type: decimal(38,18)), c7 (type: decimal(38,18)), c8 (type: decimal(38,18)), c9 (type: decimal(38,18)), c10 (type: decimal(38,18)), c11 (type: decimal(38,18)), c12 (type: float), c13 (type: float), c14 (type: float), c15 (type: float), c16 (type: float), c17 (type: float), c18 (type: float), c19 (type: float), c20 (type: float), c21 (type: float), c22 (type: float), c23 (type: double), c24 (type: double), c25 (type: double), c26 (type: double), c27 (type: double), c28 (type: double), c29 (type: double), c30 (type: double), c31 (type: double), c32 (type: double), c33 (type: double), b (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 - Statistics: Num rows: 6 Data size: 9336 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 9336 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -769,14 +769,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_various_various_date - Statistics: Num rows: 6 Data size: 4916 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: insert_num (type: int), part (type: int), c1 (type: date), c2 (type: date), c3 (type: date), c4 (type: date), b (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 6 Data size: 2496 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 2496 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -981,14 +981,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_same_type_different_params - Statistics: Num rows: 13 Data size: 19409 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 13 Data size: 9347 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: insert_num (type: int), part (type: int), c1 (type: char(8)), c2 (type: char(32)), c3 (type: varchar(15)), c4 (type: varchar(18)), c5 (type: decimal(10,2)), c6 (type: decimal(25,15)), b (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 13 Data size: 10725 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 13 Data size: 9347 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 10725 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 13 Data size: 9347 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat http://git-wip-us.apache.org/repos/asf/hive/blob/3bbc24d2/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_table.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_table.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_table.q.out index c97dd65..c4382dd 100644 --- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_table.q.out +++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_nonvec_table.q.out @@ -79,14 +79,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: table_add_int_permute_select - Statistics: Num rows: 6 Data size: 1152 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: insert_num (type: int), a (type: int), b (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 1152 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 1152 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -216,14 +216,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: table_add_int_string_permute_select - Statistics: Num rows: 6 Data size: 1152 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: insert_num (type: int), a (type: int), b (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 1152 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 1152 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -415,14 +415,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: table_change_string_group_double - Statistics: Num rows: 6 Data size: 1272 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 720 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: insert_num (type: int), c1 (type: double), c2 (type: double), c3 (type: double), b (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 6 Data size: 1272 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 720 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 1272 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 720 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -683,14 +683,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: table_change_numeric_group_string_group_multi_ints_string_group - Statistics: Num rows: 6 Data size: 16248 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 15696 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: insert_num (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: string), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(50)), c8 (type: char(50)), c9 (type: char(5)), c10 (type: char(5)), c11 (type: char(5)), c12 (type: char(5)), c13 (type: varchar(50)), c14 (type: varchar(50)), c15 (type: varchar(50)), c16 (type: varchar(50)), c17 (type: varchar(5)), c18 (type: varchar(5)), c19 (type: varchar(5)), c20 (type: varchar(5)), b (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21 - Statistics: Num rows: 6 Data size: 16248 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 15696 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 16248 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 15696 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -862,14 +862,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: table_change_numeric_group_string_group_floating_string_group - Statistics: Num rows: 6 Data size: 12540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 10884 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: insert_num (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: char(50)), c5 (type: char(50)), c6 (type: char(50)), c7 (type: char(7)), c8 (type: char(7)), c9 (type: char(7)), c10 (type: varchar(50)), c11 (type: varchar(50)), c12 (type: varchar(50)), c13 (type: varchar(7)), c14 (type: varchar(7)), c15 (type: varchar(7)), b (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16 - Statistics: Num rows: 6 Data size: 12540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 10884 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 12540 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 10884 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat http://git-wip-us.apache.org/repos/asf/hive/blob/3bbc24d2/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out index c752246..4aa04ed 100644 --- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out +++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out @@ -77,7 +77,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_add_int_permute_select - Statistics: Num rows: 2 Data size: 586 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE TableScan Vectorization: native: true vectorizationSchemaColumns: [0:insert_num:int, 1:a:int, 2:b:string, 3:c:int, 4:part:int, 5:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>] @@ -88,13 +88,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 4, 1, 2] - Statistics: Num rows: 2 Data size: 392 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 2 Data size: 392 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -232,7 +232,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_add_int_string_permute_select - Statistics: Num rows: 2 Data size: 674 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE TableScan Vectorization: native: true vectorizationSchemaColumns: [0:insert_num:int, 1:a:int, 2:b:string, 3:c:int, 4:d:string, 5:part:int, 6:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>] @@ -243,13 +243,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 5, 1, 2] - Statistics: Num rows: 2 Data size: 392 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 2 Data size: 392 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 2 Data size: 198 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -445,7 +445,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_string_group_double - Statistics: Num rows: 5 Data size: 3190 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 500 Basic stats: COMPLETE Column stats: PARTIAL TableScan Vectorization: native: true vectorizationSchemaColumns: [0:insert_num:int, 1:c1:double, 2:c2:double, 3:c3:double, 4:b:string, 5:part:int, 6:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>] @@ -456,13 +456,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 5, 1, 2, 3, 4] - Statistics: Num rows: 5 Data size: 1080 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 500 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 5 Data size: 1080 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 500 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -814,7 +814,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_numeric_group_string_group_multi_ints_string_group - Statistics: Num rows: 6 Data size: 19151 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1140 Basic stats: COMPLETE Column stats: COMPLETE TableScan Vectorization: native: true vectorizationSchemaColumns: [0:insert_num:int, 1:c1:string, 2:c2:string, 3:c3:string, 4:c4:string, 5:c5:char(50), 6:c6:char(50), 7:c7:char(50), 8:c8:char(50), 9:c9:char(5), 10:c10:char(5), 11:c11:char(5), 12:c12:char(5), 13:c13:varchar(50), 14:c14:varchar(50), 15:c15:varchar(50), 16:c16:varchar(50), 17:c17:varchar(5), 18:c18:varchar(5), 19:c19:varchar(5), 20:c20:varchar(5), 21:b:string, 22:part:int, 23:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>] @@ -825,13 +825,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 22, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] - Statistics: Num rows: 6 Data size: 16272 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1140 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 6 Data size: 16272 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1140 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1023,7 +1023,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_numeric_group_string_group_floating_string_group - Statistics: Num rows: 6 Data size: 17080 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 3300 Basic stats: COMPLETE Column stats: PARTIAL TableScan Vectorization: native: true vectorizationSchemaColumns: [0:insert_num:int, 1:c1:string, 2:c2:string, 3:c3:string, 4:c4:char(50), 5:c5:char(50), 6:c6:char(50), 7:c7:char(7), 8:c8:char(7), 9:c9:char(7), 10:c10:varchar(50), 11:c11:varchar(50), 12:c12:varchar(50), 13:c13:varchar(7), 14:c14:varchar(7), 15:c15:varchar(7), 16:b:string, 17:part:int, 18:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>] @@ -1034,13 +1034,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 17, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - Statistics: Num rows: 6 Data size: 12564 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 3300 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 6 Data size: 12564 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 3300 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1218,7 +1218,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_string_group_string_group_string - Statistics: Num rows: 6 Data size: 15466 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 2712 Basic stats: COMPLETE Column stats: PARTIAL TableScan Vectorization: native: true vectorizationSchemaColumns: [0:insert_num:int, 1:c1:char(50), 2:c2:char(9), 3:c3:varchar(50), 4:c4:char(9), 5:c5:varchar(50), 6:c6:varchar(9), 7:c7:string, 8:c8:char(50), 9:c9:char(9), 10:c10:string, 11:b:string, 12:part:int, 13:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>] @@ -1229,13 +1229,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] - Statistics: Num rows: 6 Data size: 8808 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 2712 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 6 Data size: 8808 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 2712 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1441,7 +1441,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_lower_to_higher_numeric_group_tinyint_to_bigint - Statistics: Num rows: 6 Data size: 5739 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE TableScan Vectorization: native: true vectorizationSchemaColumns: [0:insert_num:int, 1:c1:smallint, 2:c2:int, 3:c3:bigint, 4:c4:decimal(38,18), 5:c5:float, 6:c6:double, 7:c7:int, 8:c8:bigint, 9:c9:decimal(38,18), 10:c10:float, 11:c11:double, 12:c12:bigint, 13:c13:decimal(38,18), 14:c14:float, 15:c15:double, 16:c16:decimal(38,18), 17:c17:float, 18:c18:double, 19:b:string, 20:part:int, 21:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>] @@ -1452,13 +1452,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - Statistics: Num rows: 6 Data size: 4344 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 6 Data size: 4344 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1606,7 +1606,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_lower_to_higher_numeric_group_decimal_to_float - Statistics: Num rows: 6 Data size: 2771 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE Column stats: PARTIAL TableScan Vectorization: native: true vectorizationSchemaColumns: [0:insert_num:int, 1:c1:float, 2:c2:double, 3:c3:double, 4:b:string, 5:part:int, 6:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>] @@ -1617,13 +1617,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 5, 1, 2, 3, 4] - Statistics: Num rows: 6 Data size: 1272 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 6 Data size: 1272 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 648 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat http://git-wip-us.apache.org/repos/asf/hive/blob/3bbc24d2/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out index 163ed4b..861d7f4 100644 --- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out +++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out @@ -457,7 +457,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_add_various_various_struct2 - Statistics: Num rows: 8 Data size: 26640 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 8 Data size: 800 Basic stats: COMPLETE Column stats: PARTIAL TableScan Vectorization: native: true vectorizationSchemaColumns: [0:insert_num:int, 1:b:string, 2:s2:struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>, 3:part:int, 4:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>] @@ -468,13 +468,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 3, 1, 2] - Statistics: Num rows: 8 Data size: 21760 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 8 Data size: 800 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 8 Data size: 21760 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 8 Data size: 800 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat http://git-wip-us.apache.org/repos/asf/hive/blob/3bbc24d2/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out index b9bdb0a..ca9066e 100644 --- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out +++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out @@ -266,7 +266,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_various_various_boolean_to_bigint - Statistics: Num rows: 10 Data size: 16128 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 10 Data size: 1228 Basic stats: COMPLETE Column stats: PARTIAL TableScan Vectorization: native: true vectorizationSchemaColumns: [0:insert_num:int, 1:c1:boolean, 2:c2:boolean, 3:c3:boolean, 4:c4:boolean, 5:c5:boolean, 6:c6:boolean, 7:c7:boolean, 8:c8:boolean, 9:c9:boolean, 10:c10:tinyint, 11:c11:tinyint, 12:c12:tinyint, 13:c13:tinyint, 14:c14:tinyint, 15:c15:tinyint, 16:c16:tinyint, 17:c17:tinyint, 18:c18:tinyint, 19:c19:tinyint, 20:c20:tinyint, 21:c21:smallint, 22:c22:smallint, 23:c23:smallint, 24:c24:smallint, 25:c25:smallint, 26:c26:smallint, 27:c27:smallint, 28:c28:smallint, 29:c29:smallint, 30:c30:smallint, 31:c31:smallint, 32:c32:int, 33:c33:int, 34:c34:int, 35:c35:int, 36:c36:int, 37:c37:int, 38:c38:int, 39:c39:int, 40:c40:int, 41:c41:int, 42:c42:int, 43:c43:bigint, 44:c44:bigint, 45:c45:bigint, 46:c46:bigint, 47:c47:bigint, 48:c48:bigint, 49:c49:bigint, 50:c50:bigint, 51:c51:bigint, 52:c52:bigint, 53:c53:bigint, 54:b:string, 55:part:int, 56:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>] @@ -277,13 +277,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 55, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54] - Statistics: Num rows: 10 Data size: 4480 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 10 Data size: 1228 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 10 Data size: 4480 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 10 Data size: 1228 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -517,7 +517,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_various_various_decimal_to_double - Statistics: Num rows: 6 Data size: 17607 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL TableScan Vectorization: native: true vectorizationSchemaColumns: [0:insert_num:int, 1:c1:decimal(38,18), 2:c2:decimal(38,18), 3:c3:decimal(38,18), 4:c4:decimal(38,18), 5:c5:decimal(38,18), 6:c6:decimal(38,18), 7:c7:decimal(38,18), 8:c8:decimal(38,18), 9:c9:decimal(38,18), 10:c10:decimal(38,18), 11:c11:decimal(38,18), 12:c12:float, 13:c13:float, 14:c14:float, 15:c15:float, 16:c16:float, 17:c17:float, 18:c18:float, 19:c19:float, 20:c20:float, 21:c21:float, 22:c22:float, 23:c23:double, 24:c24:double, 25:c25:double, 26:c26:double, 27:c27:double, 28:c28:double, 29:c29:double, 30:c30:double, 31:c31:double, 32:c32:double, 33:c33:double, 34:b:string, 35:part:int, 36:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>] @@ -528,13 +528,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 35, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34] - Statistics: Num rows: 6 Data size: 9336 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 6 Data size: 9336 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -847,7 +847,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_various_various_date - Statistics: Num rows: 6 Data size: 4916 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE TableScan Vectorization: native: true vectorizationSchemaColumns: [0:insert_num:int, 1:c1:date, 2:c2:date, 3:c3:date, 4:c4:date, 5:b:string, 6:part:int, 7:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>] @@ -858,13 +858,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 6, 1, 2, 3, 4, 5] - Statistics: Num rows: 6 Data size: 2496 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 6 Data size: 2496 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -1085,7 +1085,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part_change_same_type_different_params - Statistics: Num rows: 13 Data size: 19409 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 13 Data size: 9347 Basic stats: COMPLETE Column stats: PARTIAL TableScan Vectorization: native: true vectorizationSchemaColumns: [0:insert_num:int, 1:c1:char(8), 2:c2:char(32), 3:c3:varchar(15), 4:c4:varchar(18), 5:c5:decimal(10,2), 6:c6:decimal(25,15), 7:b:string, 8:part:int, 9:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>] @@ -1096,13 +1096,13 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 8, 1, 2, 3, 4, 5, 6, 7] - Statistics: Num rows: 13 Data size: 10725 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 13 Data size: 9347 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 13 Data size: 10725 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 13 Data size: 9347 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat