hive git commit: HIVE-19346 : TestMiniLlapLocalCliDriver.testCliDriver[materialized_view_create_rewrite_5] failling

2018-04-28 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/master fc750ed38 -> 9caab816f


HIVE-19346 : 
TestMiniLlapLocalCliDriver.testCliDriver[materialized_view_create_rewrite_5] 
failling


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9caab816
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9caab816
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9caab816

Branch: refs/heads/master
Commit: 9caab816f2bf86f73015bff21c8ce932ac6cac99
Parents: fc750ed
Author: Ashutosh Chauhan 
Authored: Sat Apr 28 23:49:54 2018 -0700
Committer: Ashutosh Chauhan 
Committed: Sat Apr 28 23:50:29 2018 -0700

--
 .../clientpositive/llap/materialized_view_create_rewrite_5.q.out   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/9caab816/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
 
b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
index d69f962..2ffc9df 100644
--- 
a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
+++ 
b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
@@ -399,7 +399,7 @@ Retention:  0
 Table Type:MATERIALIZED_VIEW
 Table Parameters:   
numFiles2   
-   totalSize   1055
+   totalSize   1053
transactional   true
transactional_propertiesdefault 
  A masked pattern was here 



[2/3] hive git commit: HIVE-19350: Vectorization: Turn off vectorization for explainuser_1.q / spark_explainuser_1 (Matt McCline, reviewed by Vineet Garg)

2018-04-28 Thread mmccline
http://git-wip-us.apache.org/repos/asf/hive/blob/af4c396e/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out 
b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
index 9827d3b..872f9c9 100644
--- a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
@@ -119,11 +119,11 @@ Stage-3
   Number of rows:100
   Select Operator [SEL_4] (rows=100 width=178)
 Output:["_col0","_col1"]
-  <-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized, llap
-PARTITION_ONLY_SHUFFLE [RS_10]
-  Limit [LIM_9] (rows=100 width=178)
+  <-Map 1 [CUSTOM_SIMPLE_EDGE] llap
+PARTITION_ONLY_SHUFFLE [RS_3]
+  Limit [LIM_2] (rows=100 width=178)
 Number of rows:100
-Select Operator [SEL_8] (rows=500 width=178)
+Select Operator [SEL_1] (rows=500 width=178)
   Output:["_col0","_col1"]
   TableScan [TS_0] (rows=500 width=178)
 
default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
@@ -158,15 +158,15 @@ Stage-0
   Fetch Operator
 limit:-1
 Stage-1
-  Reducer 2 vectorized, llap
-  File Output Operator [FS_14]
-Group By Operator [GBY_13] (rows=1 width=16)
+  Reducer 2 llap
+  File Output Operator [FS_8]
+Group By Operator [GBY_6] (rows=1 width=16)
   
Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
-<-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized, llap
-  PARTITION_ONLY_SHUFFLE [RS_12]
-Group By Operator [GBY_11] (rows=1 width=16)
+<-Map 1 [CUSTOM_SIMPLE_EDGE] llap
+  PARTITION_ONLY_SHUFFLE [RS_5]
+Group By Operator [GBY_4] (rows=1 width=16)
   Output:["_col0","_col1"],aggregations:["sum(_col0)","sum(_col1)"]
-  Select Operator [SEL_10] (rows=500 width=95)
+  Select Operator [SEL_2] (rows=500 width=95)
 Output:["_col0","_col1"]
 TableScan [TS_0] (rows=500 width=95)
   
default@src_orc_merge_test_part,src_orc_merge_test_part,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
@@ -192,15 +192,15 @@ Stage-0
   Fetch Operator
 limit:-1
 Stage-1
-  Reducer 2 vectorized, llap
-  File Output Operator [FS_13]
-Group By Operator [GBY_12] (rows=1 width=8)
+  Reducer 2 llap
+  File Output Operator [FS_7]
+Group By Operator [GBY_5] (rows=1 width=8)
   Output:["_col0"],aggregations:["count(VALUE._col0)"]
-<-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized, llap
-  PARTITION_ONLY_SHUFFLE [RS_11]
-Group By Operator [GBY_10] (rows=1 width=8)
+<-Map 1 [CUSTOM_SIMPLE_EDGE] llap
+  PARTITION_ONLY_SHUFFLE [RS_4]
+Group By Operator [GBY_3] (rows=1 width=8)
   Output:["_col0"],aggregations:["count()"]
-  Select Operator [SEL_9] (rows=500 width=102)
+  Select Operator [SEL_2] (rows=500 width=102)
 TableScan [TS_0] (rows=500 width=102)
   
default@src_orc_merge_test_part,src_orc_merge_test_part,Tbl:COMPLETE,Col:COMPLETE
 
@@ -217,15 +217,15 @@ Stage-0
   Fetch Operator
 limit:-1
 Stage-1
-  Reducer 2 vectorized, llap
-  File Output Operator [FS_14]
-Group By Operator [GBY_13] (rows=1 width=16)
+  Reducer 2 llap
+  File Output Operator [FS_8]
+Group By Operator [GBY_6] (rows=1 width=16)
   
Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
-<-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized, llap
-  PARTITION_ONLY_SHUFFLE [RS_12]
-Group By Operator [GBY_11] (rows=1 width=16)
+<-Map 1 [CUSTOM_SIMPLE_EDGE] llap
+  PARTITION_ONLY_SHUFFLE [RS_5]
+Group By Operator [GBY_4] (rows=1 width=16)
   Output:["_col0","_col1"],aggregations:["sum(_col0)","sum(_col1)"]
-  Select Operator [SEL_10] (rows=500 width=95)
+  Select Operator [SEL_2] (rows=500 width=95)
 Output:["_col0","_col1"]
 TableScan [TS_0] (rows=500 width=95)
   
default@src_orc_merge_test_part,src_orc_merge_test_part,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
@@ -269,9 +269,9 @@ Stage-0
   Fetch Operator
 limit:-1
 Stage-1
-  Reducer 3 vectorized, llap
-  File Output Operator [FS_26]
-Group By Operator [GBY_25] (rows=1 width=8)
+  Reducer 3 llap
+  File Output Operator [F

[1/3] hive git commit: HIVE-19350: Vectorization: Turn off vectorization for explainuser_1.q / spark_explainuser_1 (Matt McCline, reviewed by Vineet Garg)

2018-04-28 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/branch-3 5b4ae7417 -> af4c396e8


http://git-wip-us.apache.org/repos/asf/hive/blob/af4c396e/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out 
b/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
index ada2eb4..4a36dd4 100644
--- a/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
@@ -50,10 +50,10 @@ Stage-2
   Move Operator
 table:{"name:":"default.src_orc_merge_test_part"}
 Stage-1
-  Map 1 vectorized
-  File Output Operator [FS_5]
+  Map 1
+  File Output Operator [FS_3]
 table:{"name:":"default.src_orc_merge_test_part"}
-Select Operator [SEL_4] (rows=500 width=10)
+Select Operator [SEL_1] (rows=500 width=10)
   Output:["_col0","_col1"]
   TableScan [TS_0] (rows=500 width=10)
 default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
@@ -83,20 +83,20 @@ Stage-2
   Move Operator
 table:{"name:":"default.src_orc_merge_test_part"}
 Stage-1
-  Reducer 2 vectorized
-  File Output Operator [FS_14]
+  Reducer 2
+  File Output Operator [FS_7]
 table:{"name:":"default.src_orc_merge_test_part"}
-Select Operator [SEL_13] (rows=100 width=10)
+Select Operator [SEL_6] (rows=100 width=10)
   Output:["_col0","_col1"]
-  Limit [LIM_12] (rows=100 width=10)
+  Limit [LIM_5] (rows=100 width=10)
 Number of rows:100
-Select Operator [SEL_11] (rows=100 width=10)
+Select Operator [SEL_4] (rows=100 width=10)
   Output:["_col0","_col1"]
-<-Map 1 [GROUP] vectorized
-  GROUP [RS_10]
-Limit [LIM_9] (rows=100 width=10)
+<-Map 1 [GROUP]
+  GROUP [RS_3]
+Limit [LIM_2] (rows=100 width=10)
   Number of rows:100
-  Select Operator [SEL_8] (rows=500 width=10)
+  Select Operator [SEL_1] (rows=500 width=10)
 Output:["_col0","_col1"]
 TableScan [TS_0] (rows=500 width=10)
   
default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
@@ -124,15 +124,15 @@ Stage-0
   Fetch Operator
 limit:-1
 Stage-1
-  Reducer 2 vectorized
-  File Output Operator [FS_14]
-Group By Operator [GBY_13] (rows=1 width=16)
+  Reducer 2
+  File Output Operator [FS_8]
+Group By Operator [GBY_6] (rows=1 width=16)
   
Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
-<-Map 1 [GROUP] vectorized
-  GROUP [RS_12]
-Group By Operator [GBY_11] (rows=1 width=16)
+<-Map 1 [GROUP]
+  GROUP [RS_5]
+Group By Operator [GBY_4] (rows=1 width=16)
   Output:["_col0","_col1"],aggregations:["sum(_col0)","sum(_col1)"]
-  Select Operator [SEL_10] (rows=500 width=94)
+  Select Operator [SEL_2] (rows=500 width=94)
 Output:["_col0","_col1"]
 TableScan [TS_0] (rows=500 width=94)
   
default@src_orc_merge_test_part,src_orc_merge_test_part,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
@@ -158,15 +158,15 @@ Stage-0
   Fetch Operator
 limit:-1
 Stage-1
-  Reducer 2 vectorized
-  File Output Operator [FS_13]
-Group By Operator [GBY_12] (rows=1 width=8)
+  Reducer 2
+  File Output Operator [FS_7]
+Group By Operator [GBY_5] (rows=1 width=8)
   Output:["_col0"],aggregations:["count(VALUE._col0)"]
-<-Map 1 [GROUP] vectorized
-  GROUP [RS_11]
-Group By Operator [GBY_10] (rows=1 width=8)
+<-Map 1 [GROUP]
+  GROUP [RS_4]
+Group By Operator [GBY_3] (rows=1 width=8)
   Output:["_col0"],aggregations:["count()"]
-  Select Operator [SEL_9] (rows=500 width=94)
+  Select Operator [SEL_2] (rows=500 width=94)
 TableScan [TS_0] (rows=500 width=94)
   
default@src_orc_merge_test_part,src_orc_merge_test_part,Tbl:COMPLETE,Col:NONE
 
@@ -183,15 +183,15 @@ Stage-0
   Fetch Operator
 limit:-1
 Stage-1
-  Reducer 2 vectorized
-  File Output Operator [FS_14]
-Group By Operator [GBY_13] (rows=1 width=16)
+  Reducer 2
+  File Output Operator [FS_8]
+Group By Operator [GBY_6] (rows=1 width=16)
   
Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
-<-Map 1 [GROUP] vectorized
-  GROUP [RS_12]
-  

[3/3] hive git commit: HIVE-19350: Vectorization: Turn off vectorization for explainuser_1.q / spark_explainuser_1 (Matt McCline, reviewed by Vineet Garg)

2018-04-28 Thread mmccline
HIVE-19350: Vectorization: Turn off vectorization for explainuser_1.q / 
spark_explainuser_1 (Matt McCline, reviewed by Vineet Garg)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/af4c396e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/af4c396e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/af4c396e

Branch: refs/heads/branch-3
Commit: af4c396e80959640f638f359ed6ee7a366f8d0b2
Parents: 5b4ae74
Author: Matt McCline 
Authored: Sun Apr 29 01:45:13 2018 -0500
Committer: Matt McCline 
Committed: Sun Apr 29 01:47:59 2018 -0500

--
 .../test/queries/clientpositive/explainuser_1.q |1 +
 .../clientpositive/spark_explainuser_1.q|1 +
 .../clientpositive/llap/explainuser_1.q.out | 2408 +-
 .../spark/spark_explainuser_1.q.out | 2288 -
 4 files changed, 2350 insertions(+), 2348 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/af4c396e/ql/src/test/queries/clientpositive/explainuser_1.q
--
diff --git a/ql/src/test/queries/clientpositive/explainuser_1.q 
b/ql/src/test/queries/clientpositive/explainuser_1.q
index a6fbb54..db4a536 100644
--- a/ql/src/test/queries/clientpositive/explainuser_1.q
+++ b/ql/src/test/queries/clientpositive/explainuser_1.q
@@ -1,3 +1,4 @@
+set hive.vectorized.execution.enabled=false;
 set hive.strict.checks.bucketing=false;
 
 set hive.mapred.mode=nonstrict;

http://git-wip-us.apache.org/repos/asf/hive/blob/af4c396e/ql/src/test/queries/clientpositive/spark_explainuser_1.q
--
diff --git a/ql/src/test/queries/clientpositive/spark_explainuser_1.q 
b/ql/src/test/queries/clientpositive/spark_explainuser_1.q
index 43252f0..28c80b7 100644
--- a/ql/src/test/queries/clientpositive/spark_explainuser_1.q
+++ b/ql/src/test/queries/clientpositive/spark_explainuser_1.q
@@ -1,3 +1,4 @@
+set hive.vectorized.execution.enabled=false;
 set hive.strict.checks.bucketing=false;
 
 set hive.mapred.mode=nonstrict;



[2/3] hive git commit: HIVE-19350: Vectorization: Turn off vectorization for explainuser_1.q / spark_explainuser_1 (Matt McCline, reviewed by Vineet Garg)

2018-04-28 Thread mmccline
http://git-wip-us.apache.org/repos/asf/hive/blob/fc750ed3/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out 
b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
index 9827d3b..872f9c9 100644
--- a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
@@ -119,11 +119,11 @@ Stage-3
   Number of rows:100
   Select Operator [SEL_4] (rows=100 width=178)
 Output:["_col0","_col1"]
-  <-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized, llap
-PARTITION_ONLY_SHUFFLE [RS_10]
-  Limit [LIM_9] (rows=100 width=178)
+  <-Map 1 [CUSTOM_SIMPLE_EDGE] llap
+PARTITION_ONLY_SHUFFLE [RS_3]
+  Limit [LIM_2] (rows=100 width=178)
 Number of rows:100
-Select Operator [SEL_8] (rows=500 width=178)
+Select Operator [SEL_1] (rows=500 width=178)
   Output:["_col0","_col1"]
   TableScan [TS_0] (rows=500 width=178)
 
default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
@@ -158,15 +158,15 @@ Stage-0
   Fetch Operator
 limit:-1
 Stage-1
-  Reducer 2 vectorized, llap
-  File Output Operator [FS_14]
-Group By Operator [GBY_13] (rows=1 width=16)
+  Reducer 2 llap
+  File Output Operator [FS_8]
+Group By Operator [GBY_6] (rows=1 width=16)
   
Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
-<-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized, llap
-  PARTITION_ONLY_SHUFFLE [RS_12]
-Group By Operator [GBY_11] (rows=1 width=16)
+<-Map 1 [CUSTOM_SIMPLE_EDGE] llap
+  PARTITION_ONLY_SHUFFLE [RS_5]
+Group By Operator [GBY_4] (rows=1 width=16)
   Output:["_col0","_col1"],aggregations:["sum(_col0)","sum(_col1)"]
-  Select Operator [SEL_10] (rows=500 width=95)
+  Select Operator [SEL_2] (rows=500 width=95)
 Output:["_col0","_col1"]
 TableScan [TS_0] (rows=500 width=95)
   
default@src_orc_merge_test_part,src_orc_merge_test_part,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
@@ -192,15 +192,15 @@ Stage-0
   Fetch Operator
 limit:-1
 Stage-1
-  Reducer 2 vectorized, llap
-  File Output Operator [FS_13]
-Group By Operator [GBY_12] (rows=1 width=8)
+  Reducer 2 llap
+  File Output Operator [FS_7]
+Group By Operator [GBY_5] (rows=1 width=8)
   Output:["_col0"],aggregations:["count(VALUE._col0)"]
-<-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized, llap
-  PARTITION_ONLY_SHUFFLE [RS_11]
-Group By Operator [GBY_10] (rows=1 width=8)
+<-Map 1 [CUSTOM_SIMPLE_EDGE] llap
+  PARTITION_ONLY_SHUFFLE [RS_4]
+Group By Operator [GBY_3] (rows=1 width=8)
   Output:["_col0"],aggregations:["count()"]
-  Select Operator [SEL_9] (rows=500 width=102)
+  Select Operator [SEL_2] (rows=500 width=102)
 TableScan [TS_0] (rows=500 width=102)
   
default@src_orc_merge_test_part,src_orc_merge_test_part,Tbl:COMPLETE,Col:COMPLETE
 
@@ -217,15 +217,15 @@ Stage-0
   Fetch Operator
 limit:-1
 Stage-1
-  Reducer 2 vectorized, llap
-  File Output Operator [FS_14]
-Group By Operator [GBY_13] (rows=1 width=16)
+  Reducer 2 llap
+  File Output Operator [FS_8]
+Group By Operator [GBY_6] (rows=1 width=16)
   
Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
-<-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized, llap
-  PARTITION_ONLY_SHUFFLE [RS_12]
-Group By Operator [GBY_11] (rows=1 width=16)
+<-Map 1 [CUSTOM_SIMPLE_EDGE] llap
+  PARTITION_ONLY_SHUFFLE [RS_5]
+Group By Operator [GBY_4] (rows=1 width=16)
   Output:["_col0","_col1"],aggregations:["sum(_col0)","sum(_col1)"]
-  Select Operator [SEL_10] (rows=500 width=95)
+  Select Operator [SEL_2] (rows=500 width=95)
 Output:["_col0","_col1"]
 TableScan [TS_0] (rows=500 width=95)
   
default@src_orc_merge_test_part,src_orc_merge_test_part,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
@@ -269,9 +269,9 @@ Stage-0
   Fetch Operator
 limit:-1
 Stage-1
-  Reducer 3 vectorized, llap
-  File Output Operator [FS_26]
-Group By Operator [GBY_25] (rows=1 width=8)
+  Reducer 3 llap
+  File Output Operator [F

[3/3] hive git commit: HIVE-19350: Vectorization: Turn off vectorization for explainuser_1.q / spark_explainuser_1 (Matt McCline, reviewed by Vineet Garg)

2018-04-28 Thread mmccline
HIVE-19350: Vectorization: Turn off vectorization for explainuser_1.q / 
spark_explainuser_1 (Matt McCline, reviewed by Vineet Garg)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fc750ed3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fc750ed3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fc750ed3

Branch: refs/heads/master
Commit: fc750ed38570940df567fe94297c44387d053301
Parents: 8349dbd
Author: Matt McCline 
Authored: Sun Apr 29 01:45:13 2018 -0500
Committer: Matt McCline 
Committed: Sun Apr 29 01:45:13 2018 -0500

--
 .../test/queries/clientpositive/explainuser_1.q |1 +
 .../clientpositive/spark_explainuser_1.q|1 +
 .../clientpositive/llap/explainuser_1.q.out | 2408 +-
 .../spark/spark_explainuser_1.q.out | 2288 -
 4 files changed, 2350 insertions(+), 2348 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/fc750ed3/ql/src/test/queries/clientpositive/explainuser_1.q
--
diff --git a/ql/src/test/queries/clientpositive/explainuser_1.q 
b/ql/src/test/queries/clientpositive/explainuser_1.q
index 9675989..0772fb9 100644
--- a/ql/src/test/queries/clientpositive/explainuser_1.q
+++ b/ql/src/test/queries/clientpositive/explainuser_1.q
@@ -7,6 +7,7 @@
 --! qt:dataset:cbo_t3
 --! qt:dataset:cbo_t2
 --! qt:dataset:cbo_t1
+set hive.vectorized.execution.enabled=false;
 set hive.strict.checks.bucketing=false;
 
 set hive.mapred.mode=nonstrict;

http://git-wip-us.apache.org/repos/asf/hive/blob/fc750ed3/ql/src/test/queries/clientpositive/spark_explainuser_1.q
--
diff --git a/ql/src/test/queries/clientpositive/spark_explainuser_1.q 
b/ql/src/test/queries/clientpositive/spark_explainuser_1.q
index 92e6554..7a11665 100644
--- a/ql/src/test/queries/clientpositive/spark_explainuser_1.q
+++ b/ql/src/test/queries/clientpositive/spark_explainuser_1.q
@@ -7,6 +7,7 @@
 --! qt:dataset:cbo_t3
 --! qt:dataset:cbo_t2
 --! qt:dataset:cbo_t1
+set hive.vectorized.execution.enabled=false;
 set hive.strict.checks.bucketing=false;
 
 set hive.mapred.mode=nonstrict;



[1/3] hive git commit: HIVE-19350: Vectorization: Turn off vectorization for explainuser_1.q / spark_explainuser_1 (Matt McCline, reviewed by Vineet Garg)

2018-04-28 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/master 8349dbde5 -> fc750ed38


http://git-wip-us.apache.org/repos/asf/hive/blob/fc750ed3/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out 
b/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
index ada2eb4..4a36dd4 100644
--- a/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
@@ -50,10 +50,10 @@ Stage-2
   Move Operator
 table:{"name:":"default.src_orc_merge_test_part"}
 Stage-1
-  Map 1 vectorized
-  File Output Operator [FS_5]
+  Map 1
+  File Output Operator [FS_3]
 table:{"name:":"default.src_orc_merge_test_part"}
-Select Operator [SEL_4] (rows=500 width=10)
+Select Operator [SEL_1] (rows=500 width=10)
   Output:["_col0","_col1"]
   TableScan [TS_0] (rows=500 width=10)
 default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
@@ -83,20 +83,20 @@ Stage-2
   Move Operator
 table:{"name:":"default.src_orc_merge_test_part"}
 Stage-1
-  Reducer 2 vectorized
-  File Output Operator [FS_14]
+  Reducer 2
+  File Output Operator [FS_7]
 table:{"name:":"default.src_orc_merge_test_part"}
-Select Operator [SEL_13] (rows=100 width=10)
+Select Operator [SEL_6] (rows=100 width=10)
   Output:["_col0","_col1"]
-  Limit [LIM_12] (rows=100 width=10)
+  Limit [LIM_5] (rows=100 width=10)
 Number of rows:100
-Select Operator [SEL_11] (rows=100 width=10)
+Select Operator [SEL_4] (rows=100 width=10)
   Output:["_col0","_col1"]
-<-Map 1 [GROUP] vectorized
-  GROUP [RS_10]
-Limit [LIM_9] (rows=100 width=10)
+<-Map 1 [GROUP]
+  GROUP [RS_3]
+Limit [LIM_2] (rows=100 width=10)
   Number of rows:100
-  Select Operator [SEL_8] (rows=500 width=10)
+  Select Operator [SEL_1] (rows=500 width=10)
 Output:["_col0","_col1"]
 TableScan [TS_0] (rows=500 width=10)
   
default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
@@ -124,15 +124,15 @@ Stage-0
   Fetch Operator
 limit:-1
 Stage-1
-  Reducer 2 vectorized
-  File Output Operator [FS_14]
-Group By Operator [GBY_13] (rows=1 width=16)
+  Reducer 2
+  File Output Operator [FS_8]
+Group By Operator [GBY_6] (rows=1 width=16)
   
Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
-<-Map 1 [GROUP] vectorized
-  GROUP [RS_12]
-Group By Operator [GBY_11] (rows=1 width=16)
+<-Map 1 [GROUP]
+  GROUP [RS_5]
+Group By Operator [GBY_4] (rows=1 width=16)
   Output:["_col0","_col1"],aggregations:["sum(_col0)","sum(_col1)"]
-  Select Operator [SEL_10] (rows=500 width=94)
+  Select Operator [SEL_2] (rows=500 width=94)
 Output:["_col0","_col1"]
 TableScan [TS_0] (rows=500 width=94)
   
default@src_orc_merge_test_part,src_orc_merge_test_part,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
@@ -158,15 +158,15 @@ Stage-0
   Fetch Operator
 limit:-1
 Stage-1
-  Reducer 2 vectorized
-  File Output Operator [FS_13]
-Group By Operator [GBY_12] (rows=1 width=8)
+  Reducer 2
+  File Output Operator [FS_7]
+Group By Operator [GBY_5] (rows=1 width=8)
   Output:["_col0"],aggregations:["count(VALUE._col0)"]
-<-Map 1 [GROUP] vectorized
-  GROUP [RS_11]
-Group By Operator [GBY_10] (rows=1 width=8)
+<-Map 1 [GROUP]
+  GROUP [RS_4]
+Group By Operator [GBY_3] (rows=1 width=8)
   Output:["_col0"],aggregations:["count()"]
-  Select Operator [SEL_9] (rows=500 width=94)
+  Select Operator [SEL_2] (rows=500 width=94)
 TableScan [TS_0] (rows=500 width=94)
   
default@src_orc_merge_test_part,src_orc_merge_test_part,Tbl:COMPLETE,Col:NONE
 
@@ -183,15 +183,15 @@ Stage-0
   Fetch Operator
 limit:-1
 Stage-1
-  Reducer 2 vectorized
-  File Output Operator [FS_14]
-Group By Operator [GBY_13] (rows=1 width=16)
+  Reducer 2
+  File Output Operator [FS_8]
+Group By Operator [GBY_6] (rows=1 width=16)
   
Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
-<-Map 1 [GROUP] vectorized
-  GROUP [RS_12]
-

hive git commit: HIVE-19270 - TestAcidOnTez tests are failing (Eugene Koifman, reviewed by Sankar Hariappan)

2018-04-28 Thread ekoifman
Repository: hive
Updated Branches:
  refs/heads/branch-3 e08cc6e6c -> 5b4ae7417


HIVE-19270 - TestAcidOnTez tests are failing (Eugene Koifman, reviewed by 
Sankar Hariappan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5b4ae741
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5b4ae741
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5b4ae741

Branch: refs/heads/branch-3
Commit: 5b4ae7417fec1f1d7288d4c5c84196c4e20b9ffc
Parents: e08cc6e
Author: Eugene Koifman 
Authored: Sat Apr 28 11:43:00 2018 -0700
Committer: Eugene Koifman 
Committed: Sat Apr 28 11:51:13 2018 -0700

--
 .../org/apache/hadoop/hive/ql/TestAcidOnTez.java| 16 
 1 file changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/5b4ae741/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
index 3227b99..ac28e43 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
@@ -220,9 +220,9 @@ public class TestAcidOnTez {
 /*
 * Expected result 0th entry i the RecordIdentifier + data.  1st entry file 
before compact*/
 String expected[][] = {
-  {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":0}\t1\t2", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/00_0"},
+  {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":0}\t5\t6", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/00_0"},
   {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":1}\t3\t4", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/00_0"},
-  {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":2}\t5\t6", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/00_0"},
+  {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":2}\t1\t2", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/00_0"},
   {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":3}\t9\t10", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "2/00_0"},
   {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":4}\t7\t8", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "2/00_0"},
   {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":5}\t5\t6", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "2/00_0"},
@@ -242,8 +242,8 @@ public class TestAcidOnTez {
   LOG.warn(s);
 }
 String[][] expected2 = {
-  {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":0}\t1\t2", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/00_0"},
   {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":1}\t3\t4", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/00_0"},
+  {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":2}\t1\t2", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/00_0"},
   {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":3}\t9\t10", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "2/00_0"},
   {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t70\t80", 
"delta_001_001_/bucket_0"}
 };
@@ -454,9 +454,9 @@ public class TestAcidOnTez {
 /*
 * Expected result 0th entry is the RecordIdentifier + data.  1st entry 
file before compact*/
 String expected[][] = {
-  {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", 
"/delta_001_001_0001/bucket_0"},
+  {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":0}\t5\t6", 
"/delta_001_001_0001/bucket_0"},
   {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", 
"/delta_001_001_0001/bucket_0"},
-  {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":2}\t5\t6", 
"/delta_001_001_0001/bucket_0"},
+  {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":2}\t1\t2", 
"/delta_001_001_0001/bucket_0"},
   {"{\"writeid\":1,\"bucketid\":536870914,\"rowid\":0}\t9\t10", 
"/delta_001_001_0002/bucket_0"},
   {"{\"writeid\":1,\"bucketid\":536870914,\"rowid\":1}\t7\t8", 
"/delta_001_001_0002/bucket_0"},
   {"{\"writeid\":1,\"bucketid\":536870914,\"rowid\":2}\t5\t6", 
"/delta_001_001_0002/bucket_0"},
@@ -476,8 +476,8 @@ public class TestAcidOnTez {
   LOG.warn(s);
 }
 String[][] expected2 = {
-  {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", 
"/delta_001_001_0001/bucket_0"},
   {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", 
"/delta_001_001_0001/bucket_0"},
+  {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":2}\t1\t2", 
"

[1/4] hive git commit: HIVE-19239 : Check for possible null timestamp fields during SerDe from Druid events (Slim Bouguerra via Ashutosh Chauhan)

2018-04-28 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/branch-3 0930154fd -> e08cc6e6c


HIVE-19239 : Check for possible null timestamp fields during SerDe from Druid 
events (Slim Bouguerra via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1b0b86c8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1b0b86c8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1b0b86c8

Branch: refs/heads/branch-3
Commit: 1b0b86c8beec0d39ec4da31bdc8ecb5854a0cc87
Parents: 0930154
Author: Slim Bouguerra 
Authored: Wed Apr 18 11:24:00 2018 -0700
Committer: Ashutosh Chauhan 
Committed: Sat Apr 28 10:48:04 2018 -0700

--
 .../serde/DruidGroupByQueryRecordReader.java| 22 +---
 .../serde/DruidTimeseriesQueryRecordReader.java | 16 +++---
 2 files changed, 17 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1b0b86c8/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
--
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
index 765f1cb..00a4b72 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
@@ -17,31 +17,29 @@
  */
 package org.apache.hadoop.hive.druid.serde;
 
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
+import com.fasterxml.jackson.core.type.TypeReference;
 import com.fasterxml.jackson.databind.JavaType;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.metamx.http.client.HttpClient;
 import io.druid.data.input.MapBasedRow;
+import io.druid.data.input.Row;
 import io.druid.query.dimension.DimensionSpec;
 import io.druid.query.dimension.ExtractionDimensionSpec;
 import io.druid.query.extraction.TimeFormatExtractionFn;
+import io.druid.query.groupby.GroupByQuery;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.InputSplit;
-
-import com.fasterxml.jackson.core.type.TypeReference;
-
-import io.druid.data.input.Row;
-import io.druid.query.groupby.GroupByQuery;
 import org.joda.time.format.ISODateTimeFormat;
 
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
 import static 
org.apache.hadoop.hive.druid.serde.DruidSerDeUtils.ISO_TIME_FORMAT;
 
 /**
@@ -133,7 +131,7 @@ public class DruidGroupByQueryRecordReader
 DruidWritable value = new DruidWritable();
 // 1) The timestamp column
 value.getValue().put(DruidStorageHandlerUtils.EVENT_TIMESTAMP_COLUMN,
-currentRow.getTimestamp().getMillis()
+currentRow.getTimestamp() == null ? null : 
currentRow.getTimestamp().getMillis()
 );
 // 2) The dimension columns
 value.getValue().putAll(currentEvent);
@@ -147,7 +145,7 @@ public class DruidGroupByQueryRecordReader
   value.getValue().clear();
   // 1) The timestamp column
   value.getValue().put(DruidStorageHandlerUtils.EVENT_TIMESTAMP_COLUMN,
-  currentRow.getTimestamp().getMillis()
+  currentRow.getTimestamp() == null ? null : 
currentRow.getTimestamp().getMillis()
   );
   // 2) The dimension columns
   value.getValue().putAll(currentEvent);

http://git-wip-us.apache.org/repos/asf/hive/blob/1b0b86c8/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
--
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
index f07f212..d726248 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
@@ -17,17 +17,15 @@
  */
 package org.apache.hadoop.hive.druid.serde;
 
-import java.io.IOException;
-
-import com.fasterxml.jackson.databind.JavaType;
-import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
-import org.apache.hadoop.io.NullWritable;
-
 import com.fasterxml.jackson.core.type.TypeReference;
-
+import com.fasterxml.jackson.databind.JavaType;
 import io.d

[4/4] hive git commit: HIVE-19288 : Implement protobuf logging hive hook. (Harish JP via Ashutosh Chauhan)

2018-04-28 Thread hashutosh
HIVE-19288 : Implement protobuf logging hive hook. (Harish JP via Ashutosh 
Chauhan)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e08cc6e6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e08cc6e6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e08cc6e6

Branch: refs/heads/branch-3
Commit: e08cc6e6cd9eb501cf88ca4b777f1e0e8d46b281
Parents: 1b0b86c
Author: Harish Jaiprakash 
Authored: Sat Apr 28 10:47:09 2018 -0700
Committer: Ashutosh Chauhan 
Committed: Sat Apr 28 10:48:17 2018 -0700

--
 .../hadoop/hive/llap/TestAsyncPbRpcProxy.java   |4 +-
 ql/pom.xml  |   34 +
 .../hive/ql/hooks/proto/HiveHookEvents.java | 2917 ++
 .../hive/ql/hooks/DatePartitionedLogger.java|  167 +
 .../hive/ql/hooks/HiveProtoLoggingHook.java |  493 +++
 .../hive/ql/hooks/ProtoMessageReader.java   |   66 +
 .../hive/ql/hooks/ProtoMessageWritable.java |  101 +
 .../hive/ql/hooks/ProtoMessageWriter.java   |   71 +
 ql/src/protobuf/HiveEvents.proto|   38 +
 .../hive/ql/hooks/TestHiveProtoLoggingHook.java |  170 +
 10 files changed, 4060 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/e08cc6e6/llap-client/src/test/org/apache/hadoop/hive/llap/TestAsyncPbRpcProxy.java
--
diff --git 
a/llap-client/src/test/org/apache/hadoop/hive/llap/TestAsyncPbRpcProxy.java 
b/llap-client/src/test/org/apache/hadoop/hive/llap/TestAsyncPbRpcProxy.java
index b152f1c..9ae 100644
--- a/llap-client/src/test/org/apache/hadoop/hive/llap/TestAsyncPbRpcProxy.java
+++ b/llap-client/src/test/org/apache/hadoop/hive/llap/TestAsyncPbRpcProxy.java
@@ -112,7 +112,9 @@ public class TestAsyncPbRpcProxy {
   super(numThreads, 1);
 }
 
-protected void submitToExecutor(LlapProtocolClientProxy.CallableRequest 
request, LlapNodeId nodeId) {
+@Override
+protected  void submitToExecutor(
+LlapProtocolClientProxy.CallableRequest request, LlapNodeId 
nodeId) {
   numSubmissionsCounters++;
   MutableInt nodeCount = numInvocationsPerNode.get(nodeId);
   if (nodeCount == null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/e08cc6e6/ql/pom.xml
--
diff --git a/ql/pom.xml b/ql/pom.xml
index 31b0a65..46ce96a 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -795,6 +795,39 @@
   

 
+
+  protobuf
+  
+
+  
+org.apache.maven.plugins
+maven-antrun-plugin
+
+  
+generate-protobuf-sources
+generate-sources
+
+  
+
+
+Building ql Protobuf
+
+
+  
+  
+  
+
+  
+
+
+  run
+
+  
+
+  
+
+  
+
   
 
   
@@ -957,6 +990,7 @@
   
 src/gen/thrift/gen-javabean
 
${project.build.directory}/generated-sources/java
+src/gen/protobuf/gen-java
   
 
   



[3/4] hive git commit: HIVE-19288 : Implement protobuf logging hive hook. (Harish JP via Ashutosh Chauhan)

2018-04-28 Thread hashutosh
http://git-wip-us.apache.org/repos/asf/hive/blob/e08cc6e6/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/hooks/proto/HiveHookEvents.java
--
diff --git 
a/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/hooks/proto/HiveHookEvents.java
 
b/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/hooks/proto/HiveHookEvents.java
new file mode 100644
index 000..6adda72
--- /dev/null
+++ 
b/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/hooks/proto/HiveHookEvents.java
@@ -0,0 +1,2917 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: HiveEvents.proto
+
+package org.apache.hadoop.hive.ql.hooks.proto;
+
+public final class HiveHookEvents {
+  private HiveHookEvents() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface MapFieldEntryOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// optional string key = 1;
+/**
+ * optional string key = 1;
+ */
+boolean hasKey();
+/**
+ * optional string key = 1;
+ */
+java.lang.String getKey();
+/**
+ * optional string key = 1;
+ */
+com.google.protobuf.ByteString
+getKeyBytes();
+
+// optional string value = 2;
+/**
+ * optional string value = 2;
+ */
+boolean hasValue();
+/**
+ * optional string value = 2;
+ */
+java.lang.String getValue();
+/**
+ * optional string value = 2;
+ */
+com.google.protobuf.ByteString
+getValueBytes();
+  }
+  /**
+   * Protobuf type {@code org.apache.hadoop.hive.ql.hooks.proto.MapFieldEntry}
+   */
+  public static final class MapFieldEntry extends
+  com.google.protobuf.GeneratedMessage
+  implements MapFieldEntryOrBuilder {
+// Use MapFieldEntry.newBuilder() to construct.
+private MapFieldEntry(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private MapFieldEntry(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final MapFieldEntry defaultInstance;
+public static MapFieldEntry getDefaultInstance() {
+  return defaultInstance;
+}
+
+public MapFieldEntry getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private MapFieldEntry(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  key_ = input.readBytes();
+  break;
+}
+case 18: {
+  bitField0_ |= 0x0002;
+  value_ = input.readBytes();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.internal_static_org_apache_hadoop_hive_ql_hooks_proto_MapFieldEntry_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.internal_static_org_apache_hadoop_hive_ql_hooks_proto_MapFieldEntry_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry.class, 
org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry.Builder.class);
+}
+
+public stat

[2/4] hive git commit: HIVE-19288 : Implement protobuf logging hive hook. (Harish JP via Ashutosh Chauhan)

2018-04-28 Thread hashutosh
http://git-wip-us.apache.org/repos/asf/hive/blob/e08cc6e6/ql/src/java/org/apache/hadoop/hive/ql/hooks/DatePartitionedLogger.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/hooks/DatePartitionedLogger.java 
b/ql/src/java/org/apache/hadoop/hive/ql/hooks/DatePartitionedLogger.java
new file mode 100644
index 000..c9d1b93
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/DatePartitionedLogger.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.hooks;
+
+import java.io.IOException;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.ZoneOffset;
+import java.time.format.DateTimeFormatter;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.yarn.util.Clock;
+
+import com.google.protobuf.MessageLite;
+import com.google.protobuf.Parser;
+
+/**
+ * Class to create proto reader and writer for a date partitioned directory 
structure.
+ *
+ * @param  The proto message type.
+ */
+public class DatePartitionedLogger {
+  // Everyone has permission to write, but with sticky set so that delete is 
restricted.
+  // This is required, since the path is same for all users and everyone 
writes into it.
+  private static final FsPermission DIR_PERMISSION = 
FsPermission.createImmutable((short)01777);
+
+  private final Parser parser;
+  private final Path basePath;
+  private final Configuration conf;
+  private final Clock clock;
+  private final FileSystem fileSystem;
+
+  public DatePartitionedLogger(Parser parser, Path baseDir, Configuration 
conf, Clock clock)
+  throws IOException {
+this.conf = conf;
+this.clock = clock;
+this.parser = parser;
+this.fileSystem = baseDir.getFileSystem(conf);
+if (!fileSystem.exists(baseDir)) {
+  fileSystem.mkdirs(baseDir);
+  fileSystem.setPermission(baseDir, DIR_PERMISSION);
+}
+this.basePath = fileSystem.resolvePath(baseDir);
+  }
+
+  /**
+   * Creates a writer for the given fileName, with date as today.
+   */
+  public ProtoMessageWriter getWriter(String fileName) throws IOException {
+Path filePath = getPathForDate(getNow().toLocalDate(), fileName);
+return new ProtoMessageWriter<>(conf, filePath, parser);
+  }
+
+  /**
+   * Creates a reader for the given filePath, no validation is done.
+   */
+  public ProtoMessageReader getReader(Path filePath) throws IOException {
+return new ProtoMessageReader<>(conf, filePath, parser);
+  }
+
+  /**
+   * Create a path for the given date and fileName. This can be used to create 
a reader.
+   */
+  public Path getPathForDate(LocalDate date, String fileName) throws 
IOException {
+Path path = new Path(basePath, getDirForDate(date));
+if (!fileSystem.exists(path)) {
+  fileSystem.mkdirs(path);
+  fileSystem.setPermission(path, DIR_PERMISSION);
+}
+return new Path(path, fileName);
+  }
+
+  /**
+   * Extract the date from the directory name, this should be a directory 
created by this class.
+   */
+  public LocalDate getDateFromDir(String dirName) {
+if (!dirName.startsWith("date=")) {
+  throw new IllegalArgumentException("Invalid directory: "+ dirName);
+}
+return LocalDate.parse(dirName.substring(5), 
DateTimeFormatter.ISO_LOCAL_DATE);
+  }
+
+  /**
+   * Returns the directory name for a given date.
+   */
+  public String getDirForDate(LocalDate date) {
+return "date=" + DateTimeFormatter.ISO_LOCAL_DATE.format(date);
+  }
+
+  /**
+   * Find next available directory, after the given directory.
+   */
+  public String getNextDirectory(String currentDir) throws IOException {
+// Fast check, if the next day directory exists return it.
+String nextDate = getDirForDate(getDateFromDir(currentDir).plusDays(1));
+if (fileSystem.exists(new Path(basePath, nextDate))) {
+  return nextDate;
+}
+// Have to scan the directory to find m

[2/3] hive git commit: HIVE-19288 : Implement protobuf logging hive hook. (Harish JP via Ashutosh Chauhan)

2018-04-28 Thread hashutosh
http://git-wip-us.apache.org/repos/asf/hive/blob/8349dbde/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/hooks/proto/HiveHookEvents.java
--
diff --git 
a/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/hooks/proto/HiveHookEvents.java
 
b/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/hooks/proto/HiveHookEvents.java
new file mode 100644
index 000..6adda72
--- /dev/null
+++ 
b/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/hooks/proto/HiveHookEvents.java
@@ -0,0 +1,2917 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: HiveEvents.proto
+
+package org.apache.hadoop.hive.ql.hooks.proto;
+
+public final class HiveHookEvents {
+  private HiveHookEvents() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface MapFieldEntryOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// optional string key = 1;
+/**
+ * optional string key = 1;
+ */
+boolean hasKey();
+/**
+ * optional string key = 1;
+ */
+java.lang.String getKey();
+/**
+ * optional string key = 1;
+ */
+com.google.protobuf.ByteString
+getKeyBytes();
+
+// optional string value = 2;
+/**
+ * optional string value = 2;
+ */
+boolean hasValue();
+/**
+ * optional string value = 2;
+ */
+java.lang.String getValue();
+/**
+ * optional string value = 2;
+ */
+com.google.protobuf.ByteString
+getValueBytes();
+  }
+  /**
+   * Protobuf type {@code org.apache.hadoop.hive.ql.hooks.proto.MapFieldEntry}
+   */
+  public static final class MapFieldEntry extends
+  com.google.protobuf.GeneratedMessage
+  implements MapFieldEntryOrBuilder {
+// Use MapFieldEntry.newBuilder() to construct.
+private MapFieldEntry(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private MapFieldEntry(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final MapFieldEntry defaultInstance;
+public static MapFieldEntry getDefaultInstance() {
+  return defaultInstance;
+}
+
+public MapFieldEntry getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private MapFieldEntry(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  key_ = input.readBytes();
+  break;
+}
+case 18: {
+  bitField0_ |= 0x0002;
+  value_ = input.readBytes();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.internal_static_org_apache_hadoop_hive_ql_hooks_proto_MapFieldEntry_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.internal_static_org_apache_hadoop_hive_ql_hooks_proto_MapFieldEntry_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry.class, 
org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry.Builder.class);
+}
+
+public stat

[3/3] hive git commit: HIVE-19288 : Implement protobuf logging hive hook. (Harish JP via Ashutosh Chauhan)

2018-04-28 Thread hashutosh
HIVE-19288 : Implement protobuf logging hive hook. (Harish JP via Ashutosh 
Chauhan)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8349dbde
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8349dbde
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8349dbde

Branch: refs/heads/master
Commit: 8349dbde55f479167e43cfd1f089e131d4271e5b
Parents: 3a0e4dd
Author: Harish Jaiprakash 
Authored: Sat Apr 28 10:47:09 2018 -0700
Committer: Ashutosh Chauhan 
Committed: Sat Apr 28 10:47:09 2018 -0700

--
 .../hadoop/hive/llap/TestAsyncPbRpcProxy.java   |4 +-
 ql/pom.xml  |   34 +
 .../hive/ql/hooks/proto/HiveHookEvents.java | 2917 ++
 .../hive/ql/hooks/DatePartitionedLogger.java|  167 +
 .../hive/ql/hooks/HiveProtoLoggingHook.java |  493 +++
 .../hive/ql/hooks/ProtoMessageReader.java   |   66 +
 .../hive/ql/hooks/ProtoMessageWritable.java |  101 +
 .../hive/ql/hooks/ProtoMessageWriter.java   |   71 +
 ql/src/protobuf/HiveEvents.proto|   38 +
 .../hive/ql/hooks/TestHiveProtoLoggingHook.java |  170 +
 10 files changed, 4060 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8349dbde/llap-client/src/test/org/apache/hadoop/hive/llap/TestAsyncPbRpcProxy.java
--
diff --git 
a/llap-client/src/test/org/apache/hadoop/hive/llap/TestAsyncPbRpcProxy.java 
b/llap-client/src/test/org/apache/hadoop/hive/llap/TestAsyncPbRpcProxy.java
index b152f1c..9ae 100644
--- a/llap-client/src/test/org/apache/hadoop/hive/llap/TestAsyncPbRpcProxy.java
+++ b/llap-client/src/test/org/apache/hadoop/hive/llap/TestAsyncPbRpcProxy.java
@@ -112,7 +112,9 @@ public class TestAsyncPbRpcProxy {
   super(numThreads, 1);
 }
 
-protected void submitToExecutor(LlapProtocolClientProxy.CallableRequest 
request, LlapNodeId nodeId) {
+@Override
+protected  void submitToExecutor(
+LlapProtocolClientProxy.CallableRequest request, LlapNodeId 
nodeId) {
   numSubmissionsCounters++;
   MutableInt nodeCount = numInvocationsPerNode.get(nodeId);
   if (nodeCount == null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/8349dbde/ql/pom.xml
--
diff --git a/ql/pom.xml b/ql/pom.xml
index 165610f..867a38a 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -795,6 +795,39 @@
   

 
+
+  protobuf
+  
+
+  
+org.apache.maven.plugins
+maven-antrun-plugin
+
+  
+generate-protobuf-sources
+generate-sources
+
+  
+
+
+Building ql Protobuf
+
+
+  
+  
+  
+
+  
+
+
+  run
+
+  
+
+  
+
+  
+
   
 
   
@@ -957,6 +990,7 @@
   
 src/gen/thrift/gen-javabean
 
${project.build.directory}/generated-sources/java
+src/gen/protobuf/gen-java
   
 
   



[1/3] hive git commit: HIVE-19288 : Implement protobuf logging hive hook. (Harish JP via Ashutosh Chauhan)

2018-04-28 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/master 3a0e4dd40 -> 8349dbde5


http://git-wip-us.apache.org/repos/asf/hive/blob/8349dbde/ql/src/java/org/apache/hadoop/hive/ql/hooks/DatePartitionedLogger.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/hooks/DatePartitionedLogger.java 
b/ql/src/java/org/apache/hadoop/hive/ql/hooks/DatePartitionedLogger.java
new file mode 100644
index 000..c9d1b93
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/DatePartitionedLogger.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.hooks;
+
+import java.io.IOException;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.ZoneOffset;
+import java.time.format.DateTimeFormatter;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.yarn.util.Clock;
+
+import com.google.protobuf.MessageLite;
+import com.google.protobuf.Parser;
+
+/**
+ * Class to create proto reader and writer for a date partitioned directory 
structure.
+ *
+ * @param  The proto message type.
+ */
+public class DatePartitionedLogger {
+  // Everyone has permission to write, but with sticky set so that delete is 
restricted.
+  // This is required, since the path is same for all users and everyone 
writes into it.
+  private static final FsPermission DIR_PERMISSION = 
FsPermission.createImmutable((short)01777);
+
+  private final Parser parser;
+  private final Path basePath;
+  private final Configuration conf;
+  private final Clock clock;
+  private final FileSystem fileSystem;
+
+  public DatePartitionedLogger(Parser parser, Path baseDir, Configuration 
conf, Clock clock)
+  throws IOException {
+this.conf = conf;
+this.clock = clock;
+this.parser = parser;
+this.fileSystem = baseDir.getFileSystem(conf);
+if (!fileSystem.exists(baseDir)) {
+  fileSystem.mkdirs(baseDir);
+  fileSystem.setPermission(baseDir, DIR_PERMISSION);
+}
+this.basePath = fileSystem.resolvePath(baseDir);
+  }
+
+  /**
+   * Creates a writer for the given fileName, with date as today.
+   */
+  public ProtoMessageWriter getWriter(String fileName) throws IOException {
+Path filePath = getPathForDate(getNow().toLocalDate(), fileName);
+return new ProtoMessageWriter<>(conf, filePath, parser);
+  }
+
+  /**
+   * Creates a reader for the given filePath, no validation is done.
+   */
+  public ProtoMessageReader getReader(Path filePath) throws IOException {
+return new ProtoMessageReader<>(conf, filePath, parser);
+  }
+
+  /**
+   * Create a path for the given date and fileName. This can be used to create 
a reader.
+   */
+  public Path getPathForDate(LocalDate date, String fileName) throws 
IOException {
+Path path = new Path(basePath, getDirForDate(date));
+if (!fileSystem.exists(path)) {
+  fileSystem.mkdirs(path);
+  fileSystem.setPermission(path, DIR_PERMISSION);
+}
+return new Path(path, fileName);
+  }
+
+  /**
+   * Extract the date from the directory name, this should be a directory 
created by this class.
+   */
+  public LocalDate getDateFromDir(String dirName) {
+if (!dirName.startsWith("date=")) {
+  throw new IllegalArgumentException("Invalid directory: "+ dirName);
+}
+return LocalDate.parse(dirName.substring(5), 
DateTimeFormatter.ISO_LOCAL_DATE);
+  }
+
+  /**
+   * Returns the directory name for a given date.
+   */
+  public String getDirForDate(LocalDate date) {
+return "date=" + DateTimeFormatter.ISO_LOCAL_DATE.format(date);
+  }
+
+  /**
+   * Find next available directory, after the given directory.
+   */
+  public String getNextDirectory(String currentDir) throws IOException {
+// Fast check, if the next day directory exists return it.
+String nextDate = getDirForDate(getDateFromDir(currentDir).plusDays(1));
+if (fileSystem.exists(new Path(basePath, nextDate)

hive git commit: HIVE-19270 - TestAcidOnTez tests are failing (Eugene Koifman, reviewed by Sankar Hariappan)

2018-04-28 Thread ekoifman
Repository: hive
Updated Branches:
  refs/heads/master e388bc770 -> 3a0e4dd40


HIVE-19270 - TestAcidOnTez tests are failing (Eugene Koifman, reviewed by 
Sankar Hariappan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3a0e4dd4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3a0e4dd4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3a0e4dd4

Branch: refs/heads/master
Commit: 3a0e4dd4036516044a2e71ad0ef812ce0d169e1a
Parents: e388bc7
Author: Eugene Koifman 
Authored: Sat Apr 28 10:41:11 2018 -0700
Committer: Eugene Koifman 
Committed: Sat Apr 28 10:41:11 2018 -0700

--
 .../org/apache/hadoop/hive/ql/TestAcidOnTez.java| 16 
 1 file changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/3a0e4dd4/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
index 3227b99..ac28e43 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java
@@ -220,9 +220,9 @@ public class TestAcidOnTez {
 /*
 * Expected result 0th entry i the RecordIdentifier + data.  1st entry file 
before compact*/
 String expected[][] = {
-  {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":0}\t1\t2", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/00_0"},
+  {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":0}\t5\t6", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/00_0"},
   {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":1}\t3\t4", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/00_0"},
-  {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":2}\t5\t6", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/00_0"},
+  {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":2}\t1\t2", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/00_0"},
   {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":3}\t9\t10", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "2/00_0"},
   {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":4}\t7\t8", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "2/00_0"},
   {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":5}\t5\t6", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "2/00_0"},
@@ -242,8 +242,8 @@ public class TestAcidOnTez {
   LOG.warn(s);
 }
 String[][] expected2 = {
-  {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":0}\t1\t2", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/00_0"},
   {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":1}\t3\t4", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/00_0"},
+  {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":2}\t1\t2", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "1/00_0"},
   {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":3}\t9\t10", 
AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "2/00_0"},
   {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t70\t80", 
"delta_001_001_/bucket_0"}
 };
@@ -454,9 +454,9 @@ public class TestAcidOnTez {
 /*
 * Expected result 0th entry is the RecordIdentifier + data.  1st entry 
file before compact*/
 String expected[][] = {
-  {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", 
"/delta_001_001_0001/bucket_0"},
+  {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":0}\t5\t6", 
"/delta_001_001_0001/bucket_0"},
   {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", 
"/delta_001_001_0001/bucket_0"},
-  {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":2}\t5\t6", 
"/delta_001_001_0001/bucket_0"},
+  {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":2}\t1\t2", 
"/delta_001_001_0001/bucket_0"},
   {"{\"writeid\":1,\"bucketid\":536870914,\"rowid\":0}\t9\t10", 
"/delta_001_001_0002/bucket_0"},
   {"{\"writeid\":1,\"bucketid\":536870914,\"rowid\":1}\t7\t8", 
"/delta_001_001_0002/bucket_0"},
   {"{\"writeid\":1,\"bucketid\":536870914,\"rowid\":2}\t5\t6", 
"/delta_001_001_0002/bucket_0"},
@@ -476,8 +476,8 @@ public class TestAcidOnTez {
   LOG.warn(s);
 }
 String[][] expected2 = {
-  {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", 
"/delta_001_001_0001/bucket_0"},
   {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", 
"/delta_001_001_0001/bucket_0"},
+  {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":2}\t1\t2", 
"/del