http://git-wip-us.apache.org/repos/asf/hive/blob/38ad7792/ql/src/test/results/clientpositive/avro_timestamp_win.q.java1.8.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/avro_timestamp_win.q.java1.8.out 
b/ql/src/test/results/clientpositive/avro_timestamp_win.q.java1.8.out
deleted file mode 100755
index 087d571..0000000
--- a/ql/src/test/results/clientpositive/avro_timestamp_win.q.java1.8.out
+++ /dev/null
@@ -1,134 +0,0 @@
-PREHOOK: query: -- Windows-specific test due to space character being escaped 
in Hive paths on Windows.
--- INCLUDE_OS_WINDOWS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE avro_timestamp_staging
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- Windows-specific test due to space character being escaped 
in Hive paths on Windows.
--- INCLUDE_OS_WINDOWS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE avro_timestamp_staging
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE avro_timestamp
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE avro_timestamp
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE avro_timestamp_casts
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE avro_timestamp_casts
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE avro_timestamp_staging (d timestamp, m1 
map<string, timestamp>, l1 array<timestamp>)
-   ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-   COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
-   STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@avro_timestamp_staging
-POSTHOOK: query: CREATE TABLE avro_timestamp_staging (d timestamp, m1 
map<string, timestamp>, l1 array<timestamp>)
-   ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-   COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
-   STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@avro_timestamp_staging
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/avro_timestamp.txt' 
OVERWRITE INTO TABLE avro_timestamp_staging
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@avro_timestamp_staging
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/avro_timestamp.txt' 
OVERWRITE INTO TABLE avro_timestamp_staging
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@avro_timestamp_staging
-PREHOOK: query: CREATE TABLE avro_timestamp (d timestamp, m1 map<string, 
timestamp>, l1 array<timestamp>)
-  PARTITIONED BY (p1 int, p2 timestamp)
-  ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-  COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
-  STORED AS AVRO
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@avro_timestamp
-POSTHOOK: query: CREATE TABLE avro_timestamp (d timestamp, m1 map<string, 
timestamp>, l1 array<timestamp>)
-  PARTITIONED BY (p1 int, p2 timestamp)
-  ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-  COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
-  STORED AS AVRO
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@avro_timestamp
-PREHOOK: query: INSERT OVERWRITE TABLE avro_timestamp PARTITION(p1=2, 
p2='2014-09-26 07:08:09.123') SELECT * FROM avro_timestamp_staging
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp_staging
-PREHOOK: Output: default@avro_timestamp@p1=2/p2=2014-09-26%2007%3A08%3A09.123
-POSTHOOK: query: INSERT OVERWRITE TABLE avro_timestamp PARTITION(p1=2, 
p2='2014-09-26 07:08:09.123') SELECT * FROM avro_timestamp_staging
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp_staging
-POSTHOOK: Output: default@avro_timestamp@p1=2/p2=2014-09-26%2007%3A08%3A09.123
-POSTHOOK: Lineage: avro_timestamp PARTITION(p1=2,p2=2014-09-26 07:08:09.123).d 
SIMPLE [(avro_timestamp_staging)avro_timestamp_staging.FieldSchema(name:d, 
type:timestamp, comment:null), ]
-POSTHOOK: Lineage: avro_timestamp PARTITION(p1=2,p2=2014-09-26 
07:08:09.123).l1 SIMPLE 
[(avro_timestamp_staging)avro_timestamp_staging.FieldSchema(name:l1, 
type:array<timestamp>, comment:null), ]
-POSTHOOK: Lineage: avro_timestamp PARTITION(p1=2,p2=2014-09-26 
07:08:09.123).m1 SIMPLE 
[(avro_timestamp_staging)avro_timestamp_staging.FieldSchema(name:m1, 
type:map<string,timestamp>, comment:null), ]
-PREHOOK: query: SELECT * FROM avro_timestamp
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp
-PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26%2007%3A08%3A09.123
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_timestamp
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp
-POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26%2007%3A08%3A09.123
-#### A masked pattern was here ####
-2012-02-21 07:08:09.123        {"bar":"1998-05-07 
07:08:09.123","foo":"1980-12-16 07:08:09.123"}       ["2011-09-04 
07:08:09.123","2011-09-05 07:08:09.123"]   2       2014-09-26 07:08:09.123
-2014-02-11 07:08:09.123        {"baz":"1981-12-16 07:08:09.123"}       
["2011-09-05 07:08:09.123"]     2       2014-09-26 07:08:09.123
-1947-02-11 07:08:09.123        {"baz":"1921-12-16 07:08:09.123"}       
["2011-09-05 07:08:09.123"]     2       2014-09-26 07:08:09.123
-8200-02-11 07:08:09.123        {"baz":"6981-12-16 07:08:09.123"}       
["1039-09-05 07:08:09.123"]     2       2014-09-26 07:08:09.123
-PREHOOK: query: SELECT d, COUNT(d) FROM avro_timestamp GROUP BY d
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp
-PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26%2007%3A08%3A09.123
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT d, COUNT(d) FROM avro_timestamp GROUP BY d
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp
-POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26%2007%3A08%3A09.123
-#### A masked pattern was here ####
-1947-02-11 07:08:09.123        1
-2012-02-21 07:08:09.123        1
-2014-02-11 07:08:09.123        1
-8200-02-11 07:08:09.123        1
-PREHOOK: query: SELECT * FROM avro_timestamp WHERE d!='1947-02-11 07:08:09.123'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp
-PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26%2007%3A08%3A09.123
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_timestamp WHERE d!='1947-02-11 
07:08:09.123'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp
-POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26%2007%3A08%3A09.123
-#### A masked pattern was here ####
-2012-02-21 07:08:09.123        {"bar":"1998-05-07 
07:08:09.123","foo":"1980-12-16 07:08:09.123"}       ["2011-09-04 
07:08:09.123","2011-09-05 07:08:09.123"]   2       2014-09-26 07:08:09.123
-2014-02-11 07:08:09.123        {"baz":"1981-12-16 07:08:09.123"}       
["2011-09-05 07:08:09.123"]     2       2014-09-26 07:08:09.123
-8200-02-11 07:08:09.123        {"baz":"6981-12-16 07:08:09.123"}       
["1039-09-05 07:08:09.123"]     2       2014-09-26 07:08:09.123
-PREHOOK: query: SELECT * FROM avro_timestamp WHERE d<'2014-12-21 07:08:09.123'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp
-PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26%2007%3A08%3A09.123
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_timestamp WHERE d<'2014-12-21 07:08:09.123'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp
-POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26%2007%3A08%3A09.123
-#### A masked pattern was here ####
-2012-02-21 07:08:09.123        {"bar":"1998-05-07 
07:08:09.123","foo":"1980-12-16 07:08:09.123"}       ["2011-09-04 
07:08:09.123","2011-09-05 07:08:09.123"]   2       2014-09-26 07:08:09.123
-2014-02-11 07:08:09.123        {"baz":"1981-12-16 07:08:09.123"}       
["2011-09-05 07:08:09.123"]     2       2014-09-26 07:08:09.123
-1947-02-11 07:08:09.123        {"baz":"1921-12-16 07:08:09.123"}       
["2011-09-05 07:08:09.123"]     2       2014-09-26 07:08:09.123
-PREHOOK: query: SELECT * FROM avro_timestamp WHERE d>'8000-12-01 07:08:09.123'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp
-PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26%2007%3A08%3A09.123
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_timestamp WHERE d>'8000-12-01 07:08:09.123'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp
-POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26%2007%3A08%3A09.123
-#### A masked pattern was here ####
-8200-02-11 07:08:09.123        {"baz":"6981-12-16 07:08:09.123"}       
["1039-09-05 07:08:09.123"]     2       2014-09-26 07:08:09.123

http://git-wip-us.apache.org/repos/asf/hive/blob/38ad7792/ql/src/test/results/clientpositive/combine2_win.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/combine2_win.q.out 
b/ql/src/test/results/clientpositive/combine2_win.q.out
deleted file mode 100644
index b33f31b..0000000
--- a/ql/src/test/results/clientpositive/combine2_win.q.out
+++ /dev/null
@@ -1,767 +0,0 @@
-PREHOOK: query: -- INCLUDE_OS_WINDOWS
--- included only on  windows because of difference in file name encoding logic
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
-
-create table combine2(key string) partitioned by (value string)
-PREHOOK: type: CREATETABLE
-POSTHOOK: query: -- INCLUDE_OS_WINDOWS
--- included only on  windows because of difference in file name encoding logic
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
-
-create table combine2(key string) partitioned by (value string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: default@combine2
-PREHOOK: query: insert overwrite table combine2 partition(value) 
-select * from (
-   select key, value from src where key < 10
-   union all 
-   select key, '|' as value from src where key = 11
-   union all
-   select key, '2010-04-21 09:45:00' value from src where key = 19) s
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@combine2
-POSTHOOK: query: insert overwrite table combine2 partition(value) 
-select * from (
-   select key, value from src where key < 10
-   union all 
-   select key, '|' as value from src where key = 11
-   union all
-   select key, '2010-04-21 09:45:00' value from src where key = 19) s
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@combine2@value=%7C
-POSTHOOK: Output: default@combine2@value=2010-04-21%2009%3A45%3A00
-POSTHOOK: Output: default@combine2@value=val_0
-POSTHOOK: Output: default@combine2@value=val_2
-POSTHOOK: Output: default@combine2@value=val_4
-POSTHOOK: Output: default@combine2@value=val_5
-POSTHOOK: Output: default@combine2@value=val_8
-POSTHOOK: Output: default@combine2@value=val_9
-POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key 
EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: show partitions combine2
-PREHOOK: type: SHOWPARTITIONS
-POSTHOOK: query: show partitions combine2
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key 
EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-value=%7C
-value=2010-04-21%2009%3A45%3A00
-value=val_0
-value=val_2
-value=val_4
-value=val_5
-value=val_8
-value=val_9
-PREHOOK: query: explain
-select key, value from combine2 where value is not null order by key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-select key, value from combine2 where value is not null order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key 
EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME combine2))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
(TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_WHERE 
(TOK_FUNCTION TOK_ISNOTNULL (TOK_TABLE_OR_COL value))) (TOK_ORDERBY 
(TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Alias -> Map Operator Tree:
-        combine2 
-          TableScan
-            alias: combine2
-            Select Operator
-              expressions:
-                    expr: key
-                    type: string
-                    expr: value
-                    type: string
-              outputColumnNames: _col0, _col1
-              Reduce Output Operator
-                key expressions:
-                      expr: _col0
-                      type: string
-                sort order: +
-                tag: -1
-                value expressions:
-                      expr: _col0
-                      type: string
-                      expr: _col1
-                      type: string
-      Reduce Operator Tree:
-        Extract
-          File Output Operator
-            compressed: false
-            GlobalTableId: 0
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-
-
-PREHOOK: query: select key, value from combine2 where value is not null order 
by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@combine2
-PREHOOK: Input: default@combine2@value=%7C
-PREHOOK: Input: default@combine2@value=2010-04-21%2009%3A45%3A00
-PREHOOK: Input: default@combine2@value=val_0
-PREHOOK: Input: default@combine2@value=val_2
-PREHOOK: Input: default@combine2@value=val_4
-PREHOOK: Input: default@combine2@value=val_5
-PREHOOK: Input: default@combine2@value=val_8
-PREHOOK: Input: default@combine2@value=val_9
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value from combine2 where value is not null order 
by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@combine2
-POSTHOOK: Input: default@combine2@value=%7C
-POSTHOOK: Input: default@combine2@value=2010-04-21%2009%3A45%3A00
-POSTHOOK: Input: default@combine2@value=val_0
-POSTHOOK: Input: default@combine2@value=val_2
-POSTHOOK: Input: default@combine2@value=val_4
-POSTHOOK: Input: default@combine2@value=val_5
-POSTHOOK: Input: default@combine2@value=val_8
-POSTHOOK: Input: default@combine2@value=val_9
-#### A masked pattern was here ####
-POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key 
EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-0      val_0
-0      val_0
-0      val_0
-11     |
-19     2010-04-21 09:45:00
-2      val_2
-4      val_4
-5      val_5
-5      val_5
-5      val_5
-8      val_8
-9      val_9
-PREHOOK: query: explain extended
-select count(1) from combine2 where value is not null
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select count(1) from combine2 where value is not null
-POSTHOOK: type: QUERY
-POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key 
EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME combine2))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION 
count 1))) (TOK_WHERE (TOK_FUNCTION TOK_ISNOTNULL (TOK_TABLE_OR_COL value)))))
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Alias -> Map Operator Tree:
-        combine2 
-          TableScan
-            alias: combine2
-            GatherStats: false
-            Select Operator
-              Group By Operator
-                aggregations:
-                      expr: count(1)
-                bucketGroup: false
-                mode: hash
-                outputColumnNames: _col0
-                Reduce Output Operator
-                  sort order: 
-                  tag: -1
-                  value expressions:
-                        expr: _col0
-                        type: bigint
-      Needs Tagging: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: value=%7C
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              value |
-            properties:
-              bucket_count -1
-              columns key
-              columns.types string
-#### A masked pattern was here ####
-              name default.combine2
-              numFiles 1
-              numRows 1
-              partition_columns value
-              rawDataSize 2
-              serialization.ddl struct combine2 { string key}
-              serialization.format 1
-              serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 3
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key
-                columns.types string
-#### A masked pattern was here ####
-                name default.combine2
-                numFiles 8
-                numPartitions 8
-                numRows 12
-                partition_columns value
-                rawDataSize 14
-                serialization.ddl struct combine2 { string key}
-                serialization.format 1
-                serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 26
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.combine2
-            name: default.combine2
-#### A masked pattern was here ####
-          Partition
-            base file name: value=2010-04-21%2009%3A45%3A00
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              value 2010-04-21 09:45:00
-            properties:
-              bucket_count -1
-              columns key
-              columns.types string
-#### A masked pattern was here ####
-              name default.combine2
-              numFiles 1
-              numRows 1
-              partition_columns value
-              rawDataSize 2
-              serialization.ddl struct combine2 { string key}
-              serialization.format 1
-              serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 3
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key
-                columns.types string
-#### A masked pattern was here ####
-                name default.combine2
-                numFiles 8
-                numPartitions 8
-                numRows 12
-                partition_columns value
-                rawDataSize 14
-                serialization.ddl struct combine2 { string key}
-                serialization.format 1
-                serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 26
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.combine2
-            name: default.combine2
-#### A masked pattern was here ####
-          Partition
-            base file name: value=val_0
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              value val_0
-            properties:
-              bucket_count -1
-              columns key
-              columns.types string
-#### A masked pattern was here ####
-              name default.combine2
-              numFiles 1
-              numRows 3
-              partition_columns value
-              rawDataSize 3
-              serialization.ddl struct combine2 { string key}
-              serialization.format 1
-              serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 6
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key
-                columns.types string
-#### A masked pattern was here ####
-                name default.combine2
-                numFiles 8
-                numPartitions 8
-                numRows 12
-                partition_columns value
-                rawDataSize 14
-                serialization.ddl struct combine2 { string key}
-                serialization.format 1
-                serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 26
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.combine2
-            name: default.combine2
-#### A masked pattern was here ####
-          Partition
-            base file name: value=val_2
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              value val_2
-            properties:
-              bucket_count -1
-              columns key
-              columns.types string
-#### A masked pattern was here ####
-              name default.combine2
-              numFiles 1
-              numRows 1
-              partition_columns value
-              rawDataSize 1
-              serialization.ddl struct combine2 { string key}
-              serialization.format 1
-              serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 2
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key
-                columns.types string
-#### A masked pattern was here ####
-                name default.combine2
-                numFiles 8
-                numPartitions 8
-                numRows 12
-                partition_columns value
-                rawDataSize 14
-                serialization.ddl struct combine2 { string key}
-                serialization.format 1
-                serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 26
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.combine2
-            name: default.combine2
-#### A masked pattern was here ####
-          Partition
-            base file name: value=val_4
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              value val_4
-            properties:
-              bucket_count -1
-              columns key
-              columns.types string
-#### A masked pattern was here ####
-              name default.combine2
-              numFiles 1
-              numRows 1
-              partition_columns value
-              rawDataSize 1
-              serialization.ddl struct combine2 { string key}
-              serialization.format 1
-              serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 2
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key
-                columns.types string
-#### A masked pattern was here ####
-                name default.combine2
-                numFiles 8
-                numPartitions 8
-                numRows 12
-                partition_columns value
-                rawDataSize 14
-                serialization.ddl struct combine2 { string key}
-                serialization.format 1
-                serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 26
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.combine2
-            name: default.combine2
-#### A masked pattern was here ####
-          Partition
-            base file name: value=val_5
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              value val_5
-            properties:
-              bucket_count -1
-              columns key
-              columns.types string
-#### A masked pattern was here ####
-              name default.combine2
-              numFiles 1
-              numRows 3
-              partition_columns value
-              rawDataSize 3
-              serialization.ddl struct combine2 { string key}
-              serialization.format 1
-              serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 6
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key
-                columns.types string
-#### A masked pattern was here ####
-                name default.combine2
-                numFiles 8
-                numPartitions 8
-                numRows 12
-                partition_columns value
-                rawDataSize 14
-                serialization.ddl struct combine2 { string key}
-                serialization.format 1
-                serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 26
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.combine2
-            name: default.combine2
-#### A masked pattern was here ####
-          Partition
-            base file name: value=val_8
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              value val_8
-            properties:
-              bucket_count -1
-              columns key
-              columns.types string
-#### A masked pattern was here ####
-              name default.combine2
-              numFiles 1
-              numRows 1
-              partition_columns value
-              rawDataSize 1
-              serialization.ddl struct combine2 { string key}
-              serialization.format 1
-              serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 2
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key
-                columns.types string
-#### A masked pattern was here ####
-                name default.combine2
-                numFiles 8
-                numPartitions 8
-                numRows 12
-                partition_columns value
-                rawDataSize 14
-                serialization.ddl struct combine2 { string key}
-                serialization.format 1
-                serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 26
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.combine2
-            name: default.combine2
-#### A masked pattern was here ####
-          Partition
-            base file name: value=val_9
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              value val_9
-            properties:
-              bucket_count -1
-              columns key
-              columns.types string
-#### A masked pattern was here ####
-              name default.combine2
-              numFiles 1
-              numRows 1
-              partition_columns value
-              rawDataSize 1
-              serialization.ddl struct combine2 { string key}
-              serialization.format 1
-              serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 2
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key
-                columns.types string
-#### A masked pattern was here ####
-                name default.combine2
-                numFiles 8
-                numPartitions 8
-                numRows 12
-                partition_columns value
-                rawDataSize 14
-                serialization.ddl struct combine2 { string key}
-                serialization.format 1
-                serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 26
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.combine2
-            name: default.combine2
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations:
-                expr: count(VALUE._col0)
-          bucketGroup: false
-          mode: mergepartial
-          outputColumnNames: _col0
-          Select Operator
-            expressions:
-                  expr: _col0
-                  type: bigint
-            outputColumnNames: _col0
-            File Output Operator
-              compressed: false
-              GlobalTableId: 0
-#### A masked pattern was here ####
-              NumFilesPerFileSink: 1
-#### A masked pattern was here ####
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    columns _col0
-                    columns.types bigint
-                    escape.delim \
-                    hive.serialization.extend.additional.nesting.levels true
-                    serialization.format 1
-              TotalFiles: 1
-              GatherStats: false
-              MultiFileSpray: false
-      Truncated Path -> Alias:
-        /combine2/value=%7C [combine2]
-        /combine2/value=2010-04-21%2009%3A45%3A00 [combine2]
-        /combine2/value=val_0 [combine2]
-        /combine2/value=val_2 [combine2]
-        /combine2/value=val_4 [combine2]
-        /combine2/value=val_5 [combine2]
-        /combine2/value=val_8 [combine2]
-        /combine2/value=val_9 [combine2]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-
-
-PREHOOK: query: select count(1) from combine2 where value is not null
-PREHOOK: type: QUERY
-PREHOOK: Input: default@combine2
-PREHOOK: Input: default@combine2@value=%7C
-PREHOOK: Input: default@combine2@value=2010-04-21%2009%3A45%3A00
-PREHOOK: Input: default@combine2@value=val_0
-PREHOOK: Input: default@combine2@value=val_2
-PREHOOK: Input: default@combine2@value=val_4
-PREHOOK: Input: default@combine2@value=val_5
-PREHOOK: Input: default@combine2@value=val_8
-PREHOOK: Input: default@combine2@value=val_9
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from combine2 where value is not null
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@combine2
-POSTHOOK: Input: default@combine2@value=%7C
-POSTHOOK: Input: default@combine2@value=2010-04-21%2009%3A45%3A00
-POSTHOOK: Input: default@combine2@value=val_0
-POSTHOOK: Input: default@combine2@value=val_2
-POSTHOOK: Input: default@combine2@value=val_4
-POSTHOOK: Input: default@combine2@value=val_5
-POSTHOOK: Input: default@combine2@value=val_8
-POSTHOOK: Input: default@combine2@value=val_9
-#### A masked pattern was here ####
-POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key 
EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-12
-PREHOOK: query: explain
-select ds, count(1) from srcpart where ds is not null group by ds
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-select ds, count(1) from srcpart where ds is not null group by ds
-POSTHOOK: type: QUERY
-POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key 
EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-ABSTRACT SYNTAX TREE:
-  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart))) (TOK_INSERT 
(TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 
(TOK_TABLE_OR_COL ds)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_WHERE 
(TOK_FUNCTION TOK_ISNOTNULL (TOK_TABLE_OR_COL ds))) (TOK_GROUPBY 
(TOK_TABLE_OR_COL ds))))
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Alias -> Map Operator Tree:
-        srcpart 
-          TableScan
-            alias: srcpart
-            Select Operator
-              expressions:
-                    expr: ds
-                    type: string
-              outputColumnNames: ds
-              Group By Operator
-                aggregations:
-                      expr: count(1)
-                bucketGroup: false
-                keys:
-                      expr: ds
-                      type: string
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Reduce Output Operator
-                  key expressions:
-                        expr: _col0
-                        type: string
-                  sort order: +
-                  Map-reduce partition columns:
-                        expr: _col0
-                        type: string
-                  tag: -1
-                  value expressions:
-                        expr: _col1
-                        type: bigint
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations:
-                expr: count(VALUE._col0)
-          bucketGroup: false
-          keys:
-                expr: KEY._col0
-                type: string
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Select Operator
-            expressions:
-                  expr: _col0
-                  type: string
-                  expr: _col1
-                  type: bigint
-            outputColumnNames: _col0, _col1
-            File Output Operator
-              compressed: false
-              GlobalTableId: 0
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-
-
-PREHOOK: query: select ds, count(1) from srcpart where ds is not null group by 
ds
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select ds, count(1) from srcpart where ds is not null group 
by ds
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: Lineage: combine2 PARTITION(value=2010-04-21 09:45:00).key 
EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_0).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_2).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_4).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_5).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_8).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=val_9).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: combine2 PARTITION(value=|).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), 
(src)src.FieldSchema(name:key, type:string, comment:default), ]
-2008-04-08     1000
-2008-04-09     1000

http://git-wip-us.apache.org/repos/asf/hive/blob/38ad7792/ql/src/test/results/clientpositive/input_part10_win.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_part10_win.q.out 
b/ql/src/test/results/clientpositive/input_part10_win.q.out
deleted file mode 100644
index 80da57c..0000000
--- a/ql/src/test/results/clientpositive/input_part10_win.q.out
+++ /dev/null
@@ -1,131 +0,0 @@
-PREHOOK: query: -- INCLUDE_OS_WINDOWS
--- included only on  windows because of difference in file name encoding logic
-
-CREATE TABLE part_special (
-  a STRING,
-  b STRING
-) PARTITIONED BY (
-  ds STRING,
-  ts STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@part_special
-POSTHOOK: query: -- INCLUDE_OS_WINDOWS
--- included only on  windows because of difference in file name encoding logic
-
-CREATE TABLE part_special (
-  a STRING,
-  b STRING
-) PARTITIONED BY (
-  ds STRING,
-  ts STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@part_special
-PREHOOK: query: EXPLAIN
-INSERT OVERWRITE TABLE part_special PARTITION(ds='2008 04 08', ts = 
'10:11:12=455')
-SELECT 1, 2 FROM src LIMIT 1
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
-INSERT OVERWRITE TABLE part_special PARTITION(ds='2008 04 08', ts = 
'10:11:12=455')
-SELECT 1, 2 FROM src LIMIT 1
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: COMPLETE
-            Select Operator
-              expressions: 1 (type: int), 2 (type: int)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE 
Column stats: COMPLETE
-              Limit
-                Number of rows: 1
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
-                Reduce Output Operator
-                  sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
-                  value expressions: _col0 (type: int), _col1 (type: int)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: int), VALUE._col1 (type: int)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: COMPLETE
-          Limit
-            Number of rows: 1
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.part_special
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008 04 08
-            ts 10:11:12=455
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.part_special
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-
-PREHOOK: query: INSERT OVERWRITE TABLE part_special PARTITION(ds='2008 04 08', 
ts = '10:11:12=455')
-SELECT 1, 2 FROM src LIMIT 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@part_special@ds=2008%2004%2008/ts=10%3A11%3A12%3D455
-POSTHOOK: query: INSERT OVERWRITE TABLE part_special PARTITION(ds='2008 04 
08', ts = '10:11:12=455')
-SELECT 1, 2 FROM src LIMIT 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@part_special@ds=2008%2004%2008/ts=10%3A11%3A12%3D455
-POSTHOOK: Lineage: part_special PARTITION(ds=2008 04 08,ts=10:11:12=455).a 
SIMPLE []
-POSTHOOK: Lineage: part_special PARTITION(ds=2008 04 08,ts=10:11:12=455).b 
SIMPLE []
-PREHOOK: query: DESCRIBE EXTENDED part_special PARTITION(ds='2008 04 08', ts = 
'10:11:12=455')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@part_special
-POSTHOOK: query: DESCRIBE EXTENDED part_special PARTITION(ds='2008 04 08', ts 
= '10:11:12=455')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@part_special
-a                      string                                      
-b                      string                                      
-ds                     string                                      
-ts                     string                                      
-                
-# Partition Information                 
-# col_name             data_type               comment             
-                
-ds                     string                                      
-ts                     string                                      
-                
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM part_special WHERE ds='2008 04 08' AND ts = 
'10:11:12=455'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@part_special
-PREHOOK: Input: default@part_special@ds=2008%2004%2008/ts=10%3A11%3A12%3D455
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM part_special WHERE ds='2008 04 08' AND ts = 
'10:11:12=455'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part_special
-POSTHOOK: Input: default@part_special@ds=2008%2004%2008/ts=10%3A11%3A12%3D455
-#### A masked pattern was here ####
-1      2       2008 04 08      10:11:12=455

http://git-wip-us.apache.org/repos/asf/hive/blob/38ad7792/ql/src/test/results/clientpositive/load_dyn_part14_win.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_dyn_part14_win.q.out 
b/ql/src/test/results/clientpositive/load_dyn_part14_win.q.out
deleted file mode 100644
index d793e39..0000000
--- a/ql/src/test/results/clientpositive/load_dyn_part14_win.q.out
+++ /dev/null
@@ -1,298 +0,0 @@
-PREHOOK: query: -- INCLUDE_OS_WINDOWS
--- included only on  windows because of difference in file name encoding logic
-
--- SORT_QUERY_RESULTS
-
-create table if not exists nzhang_part14 (key string)
-  partitioned by (value string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@nzhang_part14
-POSTHOOK: query: -- INCLUDE_OS_WINDOWS
--- included only on  windows because of difference in file name encoding logic
-
--- SORT_QUERY_RESULTS
-
-create table if not exists nzhang_part14 (key string)
-  partitioned by (value string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@nzhang_part14
-PREHOOK: query: describe extended nzhang_part14
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@nzhang_part14
-POSTHOOK: query: describe extended nzhang_part14
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@nzhang_part14
-key                    string                                      
-value                  string                                      
-                
-# Partition Information                 
-# col_name             data_type               comment             
-                
-value                  string                                      
-                
-#### A masked pattern was here ####
-PREHOOK: query: explain
-insert overwrite table nzhang_part14 partition(value) 
-select key, value from (
-  select * from (select 'k1' as key, cast(null as string) as value from src 
limit 2)a 
-  union all
-  select * from (select 'k2' as key, '' as value from src limit 2)b
-  union all 
-  select * from (select 'k3' as key, ' ' as value from src limit 2)c
-) T
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-insert overwrite table nzhang_part14 partition(value) 
-select key, value from (
-  select * from (select 'k1' as key, cast(null as string) as value from src 
limit 2)a 
-  union all
-  select * from (select 'k2' as key, '' as value from src limit 2)b
-  union all 
-  select * from (select 'k3' as key, ' ' as value from src limit 2)c
-) T
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-9, Stage-10
-  Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6
-  Stage-5
-  Stage-0 depends on stages: Stage-5, Stage-4, Stage-7
-  Stage-3 depends on stages: Stage-0
-  Stage-4
-  Stage-6
-  Stage-7 depends on stages: Stage-6
-  Stage-9 is a root stage
-  Stage-10 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: COMPLETE
-            Select Operator
-              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL 
Column stats: COMPLETE
-              Limit
-                Number of rows: 2
-                Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL 
Column stats: COMPLETE
-                Reduce Output Operator
-                  sort order: 
-                  Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL 
Column stats: COMPLETE
-      Reduce Operator Tree:
-        Limit
-          Number of rows: 2
-          Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column 
stats: COMPLETE
-          Select Operator
-            expressions: 'k1' (type: string), null (type: string)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column 
stats: COMPLETE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Union
-              Statistics: Num rows: 6 Data size: 0 Basic stats: PARTIAL Column 
stats: COMPLETE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 6 Data size: 0 Basic stats: PARTIAL 
Column stats: COMPLETE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.nzhang_part14
-          TableScan
-            Union
-              Statistics: Num rows: 6 Data size: 0 Basic stats: PARTIAL Column 
stats: COMPLETE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 6 Data size: 0 Basic stats: PARTIAL 
Column stats: COMPLETE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.nzhang_part14
-          TableScan
-            Union
-              Statistics: Num rows: 6 Data size: 0 Basic stats: PARTIAL Column 
stats: COMPLETE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 6 Data size: 0 Basic stats: PARTIAL 
Column stats: COMPLETE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.nzhang_part14
-
-  Stage: Stage-8
-    Conditional Operator
-
-  Stage: Stage-5
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            value 
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.nzhang_part14
-
-  Stage: Stage-3
-    Stats-Aggr Operator
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.nzhang_part14
-
-  Stage: Stage-6
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.nzhang_part14
-
-  Stage: Stage-7
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-9
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: COMPLETE
-            Select Operator
-              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL 
Column stats: COMPLETE
-              Limit
-                Number of rows: 2
-                Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL 
Column stats: COMPLETE
-                Reduce Output Operator
-                  sort order: 
-                  Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL 
Column stats: COMPLETE
-      Reduce Operator Tree:
-        Limit
-          Number of rows: 2
-          Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column 
stats: COMPLETE
-          Select Operator
-            expressions: 'k2' (type: string), '' (type: string)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column 
stats: COMPLETE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-10
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: COMPLETE
-            Select Operator
-              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL 
Column stats: COMPLETE
-              Limit
-                Number of rows: 2
-                Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL 
Column stats: COMPLETE
-                Reduce Output Operator
-                  sort order: 
-                  Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL 
Column stats: COMPLETE
-      Reduce Operator Tree:
-        Limit
-          Number of rows: 2
-          Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column 
stats: COMPLETE
-          Select Operator
-            expressions: 'k3' (type: string), ' ' (type: string)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column 
stats: COMPLETE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-PREHOOK: query: insert overwrite table nzhang_part14 partition(value) 
-select key, value from (
-  select * from (select 'k1' as key, cast(null as string) as value from src 
limit 2)a 
-  union all
-  select * from (select 'k2' as key, '' as value from src limit 2)b
-  union all 
-  select * from (select 'k3' as key, ' ' as value from src limit 2)c
-) T
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@nzhang_part14
-POSTHOOK: query: insert overwrite table nzhang_part14 partition(value) 
-select key, value from (
-  select * from (select 'k1' as key, cast(null as string) as value from src 
limit 2)a 
-  union all
-  select * from (select 'k2' as key, '' as value from src limit 2)b
-  union all 
-  select * from (select 'k3' as key, ' ' as value from src limit 2)c
-) T
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@nzhang_part14@value=%2520
-POSTHOOK: Output: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__
-POSTHOOK: Lineage: nzhang_part14 PARTITION(value=%20).key EXPRESSION []
-POSTHOOK: Lineage: nzhang_part14 
PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION []
-PREHOOK: query: show partitions nzhang_part14
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@nzhang_part14
-POSTHOOK: query: show partitions nzhang_part14
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@nzhang_part14
-value=%2520
-value=__HIVE_DEFAULT_PARTITION__
-PREHOOK: query: select * from nzhang_part14 where value <> 'a'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@nzhang_part14
-PREHOOK: Input: default@nzhang_part14@value=%2520
-PREHOOK: Input: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__
-#### A masked pattern was here ####
-POSTHOOK: query: select * from nzhang_part14 where value <> 'a'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@nzhang_part14
-POSTHOOK: Input: default@nzhang_part14@value=%2520
-POSTHOOK: Input: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__
-#### A masked pattern was here ####
-k1     __HIVE_DEFAULT_PARTITION__
-k1     __HIVE_DEFAULT_PARTITION__
-k2     __HIVE_DEFAULT_PARTITION__
-k2     __HIVE_DEFAULT_PARTITION__
-k3     %20
-k3     %20

Reply via email to