[24/51] [partial] hive git commit: HIVE-18778: Needs to capture input/output entities in explain (Daniel Dai, reviewed by Thejas Nair)

2018-09-27 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out 
b/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out
index 3954be5..e3906cc 100644
--- a/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out
+++ b/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out
@@ -14,8 +14,12 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@insert_into1_n0
 PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1_n0 values(default, 
DEFAULT)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@insert_into1_n0
 POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1_n0 values(default, 
DEFAULT)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@insert_into1_n0
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -149,8 +153,12 @@ POSTHOOK: type: TRUNCATETABLE
 POSTHOOK: Output: default@insert_into1_n0
 PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1_n0 values(234, dEfAULt)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@insert_into1_n0
 POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1_n0 values(234, dEfAULt)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@insert_into1_n0
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -284,8 +292,12 @@ POSTHOOK: type: TRUNCATETABLE
 POSTHOOK: Output: default@insert_into1_n0
 PREHOOK: query: explain insert into insert_into1_n0 values(default, 
3),(2,default)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@insert_into1_n0
 POSTHOOK: query: explain insert into insert_into1_n0 values(default, 
3),(2,default)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@insert_into1_n0
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -420,8 +432,12 @@ POSTHOOK: type: TRUNCATETABLE
 POSTHOOK: Output: default@insert_into1_n0
 PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1_n0(key) values(default)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@insert_into1_n0
 POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1_n0(key) values(default)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@insert_into1_n0
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -555,8 +571,12 @@ POSTHOOK: type: TRUNCATETABLE
 POSTHOOK: Output: default@insert_into1_n0
 PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1_n0(key, value) 
values(2,default)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@insert_into1_n0
 POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1_n0(key, value) 
values(2,default)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@insert_into1_n0
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -708,8 +728,12 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@insert_into1_n0
 PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1_n0 values(default, 
DEFAULT)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@insert_into1_n0
 POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1_n0 values(default, 
DEFAULT)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@insert_into1_n0
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -843,8 +867,12 @@ POSTHOOK: type: TRUNCATETABLE
 POSTHOOK: Output: default@insert_into1_n0
 PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1_n0 values(234, dEfAULt)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@insert_into1_n0
 POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1_n0 values(234, dEfAULt)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@insert_into1_n0
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -978,8 +1006,12 @@ POSTHOOK: type: TRUNCATETABLE
 POSTHOOK: Output: default@insert_into1_n0
 PREHOOK: query: explain insert into insert_into1_n0 values(default, 
3),(2,default)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@insert_into1_n0
 POSTHOOK: query: explain insert into insert_into1_n0 values(default, 
3),(2,default)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: 

[32/51] [partial] hive git commit: HIVE-18778: Needs to capture input/output entities in explain (Daniel Dai, reviewed by Thejas Nair)

2018-09-27 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/llap/bucket3.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/bucket3.q.out 
b/ql/src/test/results/clientpositive/llap/bucket3.q.out
index ebcedc9..1f45b19 100644
--- a/ql/src/test/results/clientpositive/llap/bucket3.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucket3.q.out
@@ -10,10 +10,14 @@ PREHOOK: query: explain extended
 insert overwrite table bucket3_1 partition (ds='1')
 select * from src
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucket3_1@ds=1
 POSTHOOK: query: explain extended
 insert overwrite table bucket3_1 partition (ds='1')
 select * from src
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucket3_1@ds=1
 OPTIMIZED SQL: SELECT `key`, `value`
 FROM `default`.`src`
 STAGE DEPENDENCIES:
@@ -269,9 +273,15 @@ POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).value SIMPLE 
[(src)src.FieldSchema(
 PREHOOK: query: explain
 select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1'
 PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket3_1
+PREHOOK: Input: default@bucket3_1@ds=1
+ A masked pattern was here 
 POSTHOOK: query: explain
 select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket3_1
+POSTHOOK: Input: default@bucket3_1@ds=1
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/llap/bucket4.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/bucket4.q.out 
b/ql/src/test/results/clientpositive/llap/bucket4.q.out
index be3f350..ec91b3e 100644
--- a/ql/src/test/results/clientpositive/llap/bucket4.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucket4.q.out
@@ -10,10 +10,14 @@ PREHOOK: query: explain extended
 insert overwrite table bucket4_1
 select * from src
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucket4_1
 POSTHOOK: query: explain extended
 insert overwrite table bucket4_1
 select * from src
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucket4_1
 OPTIMIZED SQL: SELECT `key`, `value`
 FROM `default`.`src`
 STAGE DEPENDENCIES:
@@ -242,9 +246,13 @@ POSTHOOK: Lineage: bucket4_1.value SIMPLE 
[(src)src.FieldSchema(name:value, type
 PREHOOK: query: explain
 select * from bucket4_1 tablesample (bucket 1 out of 2) s
 PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket4_1
+ A masked pattern was here 
 POSTHOOK: query: explain
 select * from bucket4_1 tablesample (bucket 1 out of 2) s
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket4_1
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/llap/bucket5.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/bucket5.q.out 
b/ql/src/test/results/clientpositive/llap/bucket5.q.out
index ee444f2..bd56c35 100644
--- a/ql/src/test/results/clientpositive/llap/bucket5.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucket5.q.out
@@ -19,11 +19,17 @@ FROM src
 INSERT OVERWRITE TABLE bucketed_table SELECT key, value
 INSERT OVERWRITE TABLE unbucketed_table SELECT key, value cluster by key
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucketed_table
+PREHOOK: Output: default@unbucketed_table
 POSTHOOK: query: EXPLAIN EXTENDED
 FROM src
 INSERT OVERWRITE TABLE bucketed_table SELECT key, value
 INSERT OVERWRITE TABLE unbucketed_table SELECT key, value cluster by key
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucketed_table
+POSTHOOK: Output: default@unbucketed_table
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-3 depends on stages: Stage-2, Stage-7, Stage-6, Stage-9

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/llap/bucket6.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/bucket6.q.out 
b/ql/src/test/results/clientpositive/llap/bucket6.q.out
index d0c1500..f8d920b 100644
--- a/ql/src/test/results/clientpositive/llap/bucket6.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucket6.q.out
@@ -9,9 +9,21 @@ POSTHOOK: Output: default@src_bucket
 PREHOOK: query: explain
 insert into table src_bucket select key,value from srcpart
 PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11

[14/51] [partial] hive git commit: HIVE-18778: Needs to capture input/output entities in explain (Daniel Dai, reviewed by Thejas Nair)

2018-09-27 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part.q.out 
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part.q.out
index d71107d..08796c3 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vecrow_part.q.out
@@ -60,9 +60,15 @@ col1 col2col3col4
 PREHOOK: query: explain vectorization detail
 select insert_num,part,a,b from part_add_int_permute_select_n11
 PREHOOK: type: QUERY
+PREHOOK: Input: default@part_add_int_permute_select_n11
+PREHOOK: Input: default@part_add_int_permute_select_n11@part=1
+ A masked pattern was here 
 POSTHOOK: query: explain vectorization detail
 select insert_num,part,a,b from part_add_int_permute_select_n11
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part_add_int_permute_select_n11
+POSTHOOK: Input: default@part_add_int_permute_select_n11@part=1
+ A masked pattern was here 
 Explain
 PLAN VECTORIZATION:
   enabled: true
@@ -219,9 +225,15 @@ col1   col2col3col4col5
 PREHOOK: query: explain vectorization detail
 select insert_num,part,a,b from part_add_int_string_permute_select_n11
 PREHOOK: type: QUERY
+PREHOOK: Input: default@part_add_int_string_permute_select_n11
+PREHOOK: Input: default@part_add_int_string_permute_select_n11@part=1
+ A masked pattern was here 
 POSTHOOK: query: explain vectorization detail
 select insert_num,part,a,b from part_add_int_string_permute_select_n11
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part_add_int_string_permute_select_n11
+POSTHOOK: Input: default@part_add_int_string_permute_select_n11@part=1
+ A masked pattern was here 
 Explain
 PLAN VECTORIZATION:
   enabled: true
@@ -432,9 +444,15 @@ insert_num double1 double1 double1 _c4
 PREHOOK: query: explain vectorization detail
 select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n11
 PREHOOK: type: QUERY
+PREHOOK: Input: default@part_change_string_group_double_n11
+PREHOOK: Input: default@part_change_string_group_double_n11@part=1
+ A masked pattern was here 
 POSTHOOK: query: explain vectorization detail
 select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n11
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part_change_string_group_double_n11
+POSTHOOK: Input: default@part_change_string_group_double_n11@part=1
+ A masked pattern was here 
 Explain
 PLAN VECTORIZATION:
   enabled: true
@@ -584,9 +602,15 @@ _col0  _col1   _col2   _col3   _col4   _col5   _col6   
_col7   _col8   _col9   _col10  _col11
 PREHOOK: query: explain vectorization detail
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from 
part_change_date_group_string_group_date_timestamp_n11
 PREHOOK: type: QUERY
+PREHOOK: Input: default@part_change_date_group_string_group_date_timestamp_n11
+PREHOOK: Input: 
default@part_change_date_group_string_group_date_timestamp_n11@part=1
+ A masked pattern was here 
 POSTHOOK: query: explain vectorization detail
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from 
part_change_date_group_string_group_date_timestamp_n11
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part_change_date_group_string_group_date_timestamp_n11
+POSTHOOK: Input: 
default@part_change_date_group_string_group_date_timestamp_n11@part=1
+ A masked pattern was here 
 Explain
 PLAN VECTORIZATION:
   enabled: true
@@ -805,9 +829,15 @@ _col0  _col1   _col2   _col3   _col4   _col5   _col6   
_col7   _col8   _col9   _col10  _col11  _col12
 PREHOOK: query: explain vectorization detail
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b
 from part_change_numeric_group_string_group_multi_ints_string_group_n11
 PREHOOK: type: QUERY
+PREHOOK: Input: 
default@part_change_numeric_group_string_group_multi_ints_string_group_n11
+PREHOOK: Input: 
default@part_change_numeric_group_string_group_multi_ints_string_group_n11@part=1
+ A masked pattern was here 
 POSTHOOK: query: explain vectorization detail
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b
 from part_change_numeric_group_string_group_multi_ints_string_group_n11
 POSTHOOK: type: QUERY
+POSTHOOK: Input: 
default@part_change_numeric_group_string_group_multi_ints_string_group_n11
+POSTHOOK: Input: 
default@part_change_numeric_group_string_group_multi_ints_string_group_n11@part=1
+ A masked pattern was here 
 Explain
 PLAN VECTORIZATION:
   enabled: true
@@ -1016,9 +1046,15 @@ _col0_col1   _col2   _col3   _col4   _col5   _col6   
_col7   _col8   _col9   _col10  _col11  _col12
 PREHOOK: query: explain vectorization detail
 select 

[49/51] [partial] hive git commit: HIVE-18778: Needs to capture input/output entities in explain (Daniel Dai, reviewed by Thejas Nair)

2018-09-27 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientnegative/spark/spark_stage_max_tasks.q.out
--
diff --git 
a/ql/src/test/results/clientnegative/spark/spark_stage_max_tasks.q.out 
b/ql/src/test/results/clientnegative/spark/spark_stage_max_tasks.q.out
index a3a264d..97780ff 100644
--- a/ql/src/test/results/clientnegative/spark/spark_stage_max_tasks.q.out
+++ b/ql/src/test/results/clientnegative/spark/spark_stage_max_tasks.q.out
@@ -2,10 +2,14 @@ PREHOOK: query: EXPLAIN
 SELECT TRANSFORM(key) USING 'python sleep.py' AS k
   FROM (SELECT key FROM src1 GROUP BY key) a ORDER BY k
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+ A masked pattern was here 
 POSTHOOK: query: EXPLAIN
 SELECT TRANSFORM(key) USING 'python sleep.py' AS k
   FROM (SELECT key FROM src1 GROUP BY key) a ORDER BY k
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientnegative/udf_assert_true.q.out
--
diff --git a/ql/src/test/results/clientnegative/udf_assert_true.q.out 
b/ql/src/test/results/clientnegative/udf_assert_true.q.out
index 7fc50d6..81a385a 100644
--- a/ql/src/test/results/clientnegative/udf_assert_true.q.out
+++ b/ql/src/test/results/clientnegative/udf_assert_true.q.out
@@ -5,8 +5,12 @@ POSTHOOK: type: DESCFUNCTION
 ASSERT_TRUE(condition) - Throw an exception if 'condition' is not true.
 PREHOOK: query: EXPLAIN SELECT ASSERT_TRUE(x > 0) FROM src LATERAL VIEW 
EXPLODE(ARRAY(1, 2)) a AS x LIMIT 2
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+ A masked pattern was here 
 POSTHOOK: query: EXPLAIN SELECT ASSERT_TRUE(x > 0) FROM src LATERAL VIEW 
EXPLODE(ARRAY(1, 2)) a AS x LIMIT 2
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -82,8 +86,12 @@ NULL
 NULL
 PREHOOK: query: EXPLAIN SELECT ASSERT_TRUE(x < 2) FROM src LATERAL VIEW 
EXPLODE(ARRAY(1, 2)) a AS x LIMIT 2
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+ A masked pattern was here 
 POSTHOOK: query: EXPLAIN SELECT ASSERT_TRUE(x < 2) FROM src LATERAL VIEW 
EXPLODE(ARRAY(1, 2)) a AS x LIMIT 2
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientnegative/udf_assert_true2.q.out
--
diff --git a/ql/src/test/results/clientnegative/udf_assert_true2.q.out 
b/ql/src/test/results/clientnegative/udf_assert_true2.q.out
index f061a92..dca3f2d 100644
--- a/ql/src/test/results/clientnegative/udf_assert_true2.q.out
+++ b/ql/src/test/results/clientnegative/udf_assert_true2.q.out
@@ -1,7 +1,11 @@
 PREHOOK: query: EXPLAIN SELECT 1 + ASSERT_TRUE(x < 2) FROM src LATERAL VIEW 
EXPLODE(ARRAY(1, 2)) a AS x LIMIT 2
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+ A masked pattern was here 
 POSTHOOK: query: EXPLAIN SELECT 1 + ASSERT_TRUE(x < 2) FROM src LATERAL VIEW 
EXPLODE(ARRAY(1, 2)) a AS x LIMIT 2
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/acid_mapjoin.q.out
--
diff --git a/ql/src/test/results/clientpositive/acid_mapjoin.q.out 
b/ql/src/test/results/clientpositive/acid_mapjoin.q.out
index 5569a03..c07b8ef 100644
--- a/ql/src/test/results/clientpositive/acid_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/acid_mapjoin.q.out
@@ -61,9 +61,15 @@ POSTHOOK: Output: default@acid1
 PREHOOK: query: explain
 select count(*) from acid1 join acid2 on acid1.key = acid2.key
 PREHOOK: type: QUERY
+PREHOOK: Input: default@acid1
+PREHOOK: Input: default@acid2
+ A masked pattern was here 
 POSTHOOK: query: explain
 select count(*) from acid1 join acid2 on acid1.key = acid2.key
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid1
+POSTHOOK: Input: default@acid2
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-5 is a root stage
   Stage-2 depends on stages: Stage-5

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/acid_nullscan.q.out
--
diff --git a/ql/src/test/results/clientpositive/acid_nullscan.q.out 
b/ql/src/test/results/clientpositive/acid_nullscan.q.out

[38/51] [partial] hive git commit: HIVE-18778: Needs to capture input/output entities in explain (Daniel Dai, reviewed by Thejas Nair)

2018-09-27 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
--
diff --git a/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out 
b/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
index 6b4489a..9fb25d2 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
@@ -38,10 +38,14 @@ PREHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1_n13
 SELECT key, count(1) FROM T1_n56 GROUP BY key
 PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n56
+PREHOOK: Output: default@outputtbl1_n13
 POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1_n13
 SELECT key, count(1) FROM T1_n56 GROUP BY key
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n56
+POSTHOOK: Output: default@outputtbl1_n13
 OPTIMIZED SQL: SELECT `key`, COUNT(*) AS `$f1`
 FROM `default`.`t1_n56`
 GROUP BY `key`
@@ -484,10 +488,14 @@ PREHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl2_n3
 SELECT key, val, count(1) FROM T1_n56 GROUP BY key, val
 PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n56
+PREHOOK: Output: default@outputtbl2_n3
 POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl2_n3
 SELECT key, val, count(1) FROM T1_n56 GROUP BY key, val
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n56
+POSTHOOK: Output: default@outputtbl2_n3
 OPTIMIZED SQL: SELECT `key`, `val`, COUNT(*) AS `$f2`
 FROM `default`.`t1_n56`
 GROUP BY `key`, `val`
@@ -865,10 +873,14 @@ PREHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1_n13
 SELECT key, count(1) FROM (SELECT key, val FROM T1_n56) subq1 GROUP BY key
 PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n56
+PREHOOK: Output: default@outputtbl1_n13
 POSTHOOK: query: EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl1_n13
 SELECT key, count(1) FROM (SELECT key, val FROM T1_n56) subq1 GROUP BY key
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n56
+POSTHOOK: Output: default@outputtbl1_n13
 OPTIMIZED SQL: SELECT `key`, COUNT(*) AS `$f1`
 FROM `default`.`t1_n56`
 GROUP BY `key`
@@ -1303,10 +1315,14 @@ PREHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1_n13
 SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n56) subq1 GROUP BY 
k
 PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n56
+PREHOOK: Output: default@outputtbl1_n13
 POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl1_n13
 SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n56) subq1 GROUP BY 
k
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n56
+POSTHOOK: Output: default@outputtbl1_n13
 OPTIMIZED SQL: SELECT `key`, COUNT(*) AS `$f1`
 FROM `default`.`t1_n56`
 GROUP BY `key`
@@ -1749,10 +1765,14 @@ PREHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl3_n1
 SELECT 1, key, count(1) FROM T1_n56 GROUP BY 1, key
 PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n56
+PREHOOK: Output: default@outputtbl3_n1
 POSTHOOK: query: EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl3_n1
 SELECT 1, key, count(1) FROM T1_n56 GROUP BY 1, key
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n56
+POSTHOOK: Output: default@outputtbl3_n1
 OPTIMIZED SQL: SELECT 1 AS `_o__c0`, `key`, COUNT(*) AS `_o__c2`
 FROM `default`.`t1_n56`
 GROUP BY `key`
@@ -2196,10 +2216,14 @@ PREHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl4_n1
 SELECT key, 1, val, count(1) FROM T1_n56 GROUP BY key, 1, val
 PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n56
+PREHOOK: Output: default@outputtbl4_n1
 POSTHOOK: query: EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl4_n1
 SELECT key, 1, val, count(1) FROM T1_n56 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n56
+POSTHOOK: Output: default@outputtbl4_n1
 OPTIMIZED SQL: SELECT `key`, 1 AS `_o__c1`, `val`, COUNT(*) AS `_o__c3`
 FROM `default`.`t1_n56`
 GROUP BY `key`, `val`
@@ -2578,10 +2602,14 @@ PREHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE outputTbl3_n1
 SELECT key, key + 1, count(1) FROM T1_n56 GROUP BY key, key + 1
 PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n56
+PREHOOK: Output: default@outputtbl3_n1
 POSTHOOK: query: EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl3_n1
 SELECT key, key + 1, count(1) FROM T1_n56 GROUP BY key, key + 1
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n56
+POSTHOOK: Output: default@outputtbl3_n1
 OPTIMIZED SQL: SELECT `key` AS `$f0`, CAST(`key` AS DOUBLE) + CAST(1 AS 
DOUBLE) AS `$f1`, COUNT(*) AS `$f2`
 FROM `default`.`t1_n56`
 GROUP BY `key`, CAST(`key` AS DOUBLE) + CAST(1 AS DOUBLE)
@@ -2960,12 +2988,16 @@ SELECT cast(key + key as string), sum(cnt) from
 (SELECT key, count(1) as cnt FROM T1_n56 GROUP BY key) subq1
 group by key + key
 PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n56
+PREHOOK: Output: default@outputtbl1_n13
 POSTHOOK: query: EXPLAIN EXTENDED 
 

[13/51] [partial] hive git commit: HIVE-18778: Needs to capture input/output entities in explain (Daniel Dai, reviewed by Thejas Nair)

2018-09-27 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/llap/semijoin6.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/semijoin6.q.out 
b/ql/src/test/results/clientpositive/llap/semijoin6.q.out
index 1c03f3f..048e714 100644
--- a/ql/src/test/results/clientpositive/llap/semijoin6.q.out
+++ b/ql/src/test/results/clientpositive/llap/semijoin6.q.out
@@ -37,9 +37,15 @@ POSTHOOK: Lineage: tx2_n0.b SCRIPT []
 PREHOOK: query: explain
 select * from tx1_n1 u left semi join tx2_n0 v on u.a=v.a
 PREHOOK: type: QUERY
+PREHOOK: Input: default@tx1_n1
+PREHOOK: Input: default@tx2_n0
+ A masked pattern was here 
 POSTHOOK: query: explain
 select * from tx1_n1 u left semi join tx2_n0 v on u.a=v.a
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tx1_n1
+POSTHOOK: Input: default@tx2_n0
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -137,9 +143,15 @@ POSTHOOK: Input: default@tx2_n0
 PREHOOK: query: explain
 select * from tx1_n1 u left semi join tx2_n0 v on u.b <=> v.b
 PREHOOK: type: QUERY
+PREHOOK: Input: default@tx1_n1
+PREHOOK: Input: default@tx2_n0
+ A masked pattern was here 
 POSTHOOK: query: explain
 select * from tx1_n1 u left semi join tx2_n0 v on u.b <=> v.b
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tx1_n1
+POSTHOOK: Input: default@tx2_n0
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -230,9 +242,15 @@ Warning: Shuffle Join MERGEJOIN[12][tables = [$hdt$_0, 
$hdt$_1]] in Stage 'Reduc
 PREHOOK: query: explain
 select * from tx1_n1 u left semi join tx2_n0 v on u.b <> v.b
 PREHOOK: type: QUERY
+PREHOOK: Input: default@tx1_n1
+PREHOOK: Input: default@tx2_n0
+ A masked pattern was here 
 POSTHOOK: query: explain
 select * from tx1_n1 u left semi join tx2_n0 v on u.b <> v.b
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tx1_n1
+POSTHOOK: Input: default@tx2_n0
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -329,9 +347,15 @@ NULL   400
 PREHOOK: query: explain
 select * from tx1_n1 u left semi join tx2_n0 v on u.a=v.a and u.b <> v.b
 PREHOOK: type: QUERY
+PREHOOK: Input: default@tx1_n1
+PREHOOK: Input: default@tx2_n0
+ A masked pattern was here 
 POSTHOOK: query: explain
 select * from tx1_n1 u left semi join tx2_n0 v on u.a=v.a and u.b <> v.b
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tx1_n1
+POSTHOOK: Input: default@tx2_n0
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -435,9 +459,15 @@ Warning: Shuffle Join MERGEJOIN[12][tables = [$hdt$_0, 
$hdt$_1]] in Stage 'Reduc
 PREHOOK: query: explain
 select * from tx1_n1 u left semi join tx2_n0 v on u.a=v.a or u.b <> v.b
 PREHOOK: type: QUERY
+PREHOOK: Input: default@tx1_n1
+PREHOOK: Input: default@tx2_n0
+ A masked pattern was here 
 POSTHOOK: query: explain
 select * from tx1_n1 u left semi join tx2_n0 v on u.a=v.a or u.b <> v.b
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tx1_n1
+POSTHOOK: Input: default@tx2_n0
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -534,9 +564,13 @@ NULL   400
 PREHOOK: query: explain
 select * from tx1_n1 u left semi join tx1_n1 v on u.a=v.a
 PREHOOK: type: QUERY
+PREHOOK: Input: default@tx1_n1
+ A masked pattern was here 
 POSTHOOK: query: explain
 select * from tx1_n1 u left semi join tx1_n1 v on u.a=v.a
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tx1_n1
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -637,6 +671,9 @@ on (u.a + v.b > 400)
   or (coalesce(u.a) + coalesce(v.b) > 1900))
   and u.a = v.a
 PREHOOK: type: QUERY
+PREHOOK: Input: default@tx1_n1
+PREHOOK: Input: default@tx2_n0
+ A masked pattern was here 
 POSTHOOK: query: explain
 select * from tx1_n1 u left semi join tx2_n0 v
 on (u.a + v.b > 400)
@@ -644,6 +681,9 @@ on (u.a + v.b > 400)
   or (coalesce(u.a) + coalesce(v.b) > 1900))
   and u.a = v.a
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tx1_n1
+POSTHOOK: Input: default@tx2_n0
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/llap/semijoin7.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/semijoin7.q.out 
b/ql/src/test/results/clientpositive/llap/semijoin7.q.out
index 5b4be8f..3143648 100644
--- a/ql/src/test/results/clientpositive/llap/semijoin7.q.out
+++ 

[48/51] [partial] hive git commit: HIVE-18778: Needs to capture input/output entities in explain (Daniel Dai, reviewed by Thejas Nair)

2018-09-27 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/annotate_stats_part.q.out
--
diff --git a/ql/src/test/results/clientpositive/annotate_stats_part.q.out 
b/ql/src/test/results/clientpositive/annotate_stats_part.q.out
index ec74ee2..6783102 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_part.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_part.q.out
@@ -42,8 +42,12 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@loc_orc_n4
 PREHOOK: query: explain select * from loc_orc_n4
 PREHOOK: type: QUERY
+PREHOOK: Input: default@loc_orc_n4
+ A masked pattern was here 
 POSTHOOK: query: explain select * from loc_orc_n4
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc_orc_n4
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -78,8 +82,16 @@ POSTHOOK: Lineage: loc_orc_n4 
PARTITION(year=__HIVE_DEFAULT_PARTITION__).state S
 POSTHOOK: Lineage: loc_orc_n4 PARTITION(year=__HIVE_DEFAULT_PARTITION__).zip 
SIMPLE [(loc_staging_n4)loc_staging_n4.FieldSchema(name:zip, type:bigint, 
comment:null), ]
 PREHOOK: query: explain select * from loc_orc_n4
 PREHOOK: type: QUERY
+PREHOOK: Input: default@loc_orc_n4
+PREHOOK: Input: default@loc_orc_n4@year=2001
+PREHOOK: Input: default@loc_orc_n4@year=__HIVE_DEFAULT_PARTITION__
+ A masked pattern was here 
 POSTHOOK: query: explain select * from loc_orc_n4
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc_orc_n4
+POSTHOOK: Input: default@loc_orc_n4@year=2001
+POSTHOOK: Input: default@loc_orc_n4@year=__HIVE_DEFAULT_PARTITION__
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -109,8 +121,14 @@ POSTHOOK: Output: default@loc_orc_n4
 POSTHOOK: Output: default@loc_orc_n4@year=2001
 PREHOOK: query: explain select * from loc_orc_n4 where 
year='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
+PREHOOK: Input: default@loc_orc_n4
+PREHOOK: Input: default@loc_orc_n4@year=__HIVE_DEFAULT_PARTITION__
+ A masked pattern was here 
 POSTHOOK: query: explain select * from loc_orc_n4 where 
year='__HIVE_DEFAULT_PARTITION__'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc_orc_n4
+POSTHOOK: Input: default@loc_orc_n4@year=__HIVE_DEFAULT_PARTITION__
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -130,8 +148,16 @@ STAGE PLANS:
 
 PREHOOK: query: explain select * from loc_orc_n4
 PREHOOK: type: QUERY
+PREHOOK: Input: default@loc_orc_n4
+PREHOOK: Input: default@loc_orc_n4@year=2001
+PREHOOK: Input: default@loc_orc_n4@year=__HIVE_DEFAULT_PARTITION__
+ A masked pattern was here 
 POSTHOOK: query: explain select * from loc_orc_n4
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc_orc_n4
+POSTHOOK: Input: default@loc_orc_n4@year=2001
+POSTHOOK: Input: default@loc_orc_n4@year=__HIVE_DEFAULT_PARTITION__
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -151,8 +177,14 @@ STAGE PLANS:
 
 PREHOOK: query: explain select * from loc_orc_n4 where year='2001'
 PREHOOK: type: QUERY
+PREHOOK: Input: default@loc_orc_n4
+PREHOOK: Input: default@loc_orc_n4@year=2001
+ A masked pattern was here 
 POSTHOOK: query: explain select * from loc_orc_n4 where year='2001'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc_orc_n4
+POSTHOOK: Input: default@loc_orc_n4@year=2001
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -184,8 +216,14 @@ POSTHOOK: Output: default@loc_orc_n4@year=2001
 POSTHOOK: Output: default@loc_orc_n4@year=__HIVE_DEFAULT_PARTITION__
 PREHOOK: query: explain select * from loc_orc_n4 where 
year='__HIVE_DEFAULT_PARTITION__'
 PREHOOK: type: QUERY
+PREHOOK: Input: default@loc_orc_n4
+PREHOOK: Input: default@loc_orc_n4@year=__HIVE_DEFAULT_PARTITION__
+ A masked pattern was here 
 POSTHOOK: query: explain select * from loc_orc_n4 where 
year='__HIVE_DEFAULT_PARTITION__'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc_orc_n4
+POSTHOOK: Input: default@loc_orc_n4@year=__HIVE_DEFAULT_PARTITION__
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -205,8 +243,16 @@ STAGE PLANS:
 
 PREHOOK: query: explain select * from loc_orc_n4
 PREHOOK: type: QUERY
+PREHOOK: Input: default@loc_orc_n4
+PREHOOK: Input: default@loc_orc_n4@year=2001
+PREHOOK: Input: default@loc_orc_n4@year=__HIVE_DEFAULT_PARTITION__
+ A masked pattern was here 
 POSTHOOK: query: explain select * from loc_orc_n4
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc_orc_n4
+POSTHOOK: Input: default@loc_orc_n4@year=2001
+POSTHOOK: Input: default@loc_orc_n4@year=__HIVE_DEFAULT_PARTITION__
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -226,8 +272,16 @@ STAGE PLANS:
 
 PREHOOK: query: explain select * from loc_orc_n4 where year='2001' or 

[23/51] [partial] hive git commit: HIVE-18778: Needs to capture input/output entities in explain (Daniel Dai, reviewed by Thejas Nair)

2018-09-27 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/llap/leftsemijoin.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/leftsemijoin.q.out 
b/ql/src/test/results/clientpositive/llap/leftsemijoin.q.out
index 363f2de..f059c75 100644
--- a/ql/src/test/results/clientpositive/llap/leftsemijoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/leftsemijoin.q.out
@@ -111,8 +111,12 @@ POSTHOOK: Output: default@things_n1
 Warning: Shuffle Join MERGEJOIN[24][tables = [$hdt$_1, $hdt$_2]] in Stage 
'Reducer 4' is a cross product
 PREHOOK: query: explain select part.p_type from part join (select p1.p_name 
from part p1, part p2 group by p1.p_name) pp ON pp.p_name = part.p_name
 PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+ A masked pattern was here 
 POSTHOOK: query: explain select part.p_type from part join (select p1.p_name 
from part p1, part p2 group by p1.p_name) pp ON pp.p_name = part.p_name
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -263,8 +267,12 @@ STANDARD PLATED TIN
 STANDARD POLISHED STEEL
 PREHOOK: query: explain select part.p_type from part left join (select 
p1.p_name from part p1, part p2 group by p1.p_name) pp ON pp.p_name = 
part.p_name
 PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+ A masked pattern was here 
 POSTHOOK: query: explain select part.p_type from part left join (select 
p1.p_name from part p1, part p2 group by p1.p_name) pp ON pp.p_name = 
part.p_name
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/llap/limit_join_transpose.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/limit_join_transpose.q.out 
b/ql/src/test/results/clientpositive/llap/limit_join_transpose.q.out
index 76a8bf6..4a9db25 100644
--- a/ql/src/test/results/clientpositive/llap/limit_join_transpose.q.out
+++ b/ql/src/test/results/clientpositive/llap/limit_join_transpose.q.out
@@ -4,12 +4,16 @@ from src src1 left outer join src src2
 on src1.key = src2.key
 limit 1
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+ A masked pattern was here 
 POSTHOOK: query: explain
 select *
 from src src1 left outer join src src2
 on src1.key = src2.key
 limit 1
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -105,12 +109,16 @@ from src src1 left outer join src src2
 on src1.key = src2.key
 limit 1
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+ A masked pattern was here 
 POSTHOOK: query: explain
 select *
 from src src1 left outer join src src2
 on src1.key = src2.key
 limit 1
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -206,12 +214,16 @@ from src src1 left outer join src src2
 on src1.key = src2.key
 limit 1
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+ A masked pattern was here 
 POSTHOOK: query: explain
 select *
 from src src1 left outer join src src2
 on src1.key = src2.key
 limit 1
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -318,6 +330,8 @@ from src src1 right outer join (
 on src1.key = src2.key
 limit 1
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+ A masked pattern was here 
 POSTHOOK: query: explain
 select *
 from src src1 right outer join (
@@ -327,6 +341,8 @@ from src src1 right outer join (
 on src1.key = src2.key
 limit 1
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -467,6 +483,8 @@ from src src1 right outer join (
 on src1.key = src2.key
 limit 1
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+ A masked pattern was here 
 POSTHOOK: query: explain
 select *
 from src src1 right outer join (
@@ -476,6 +494,8 @@ from src src1 right outer join (
 on src1.key = src2.key
 limit 1
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -632,6 +652,8 @@ on src1.key = src2.key
 order by src2.key
 limit 1
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+ A masked pattern was here 
 POSTHOOK: query: explain
 select *
 from src src1 right outer join (
@@ -642,6 

[36/51] [partial] hive git commit: HIVE-18778: Needs to capture input/output entities in explain (Daniel Dai, reviewed by Thejas Nair)

2018-09-27 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/join22.q.out
--
diff --git a/ql/src/test/results/clientpositive/join22.q.out 
b/ql/src/test/results/clientpositive/join22.q.out
index 3695655..7aa05eb 100644
--- a/ql/src/test/results/clientpositive/join22.q.out
+++ b/ql/src/test/results/clientpositive/join22.q.out
@@ -1,9 +1,13 @@
 PREHOOK: query: explain
 SELECT src5.src1_value FROM (SELECT src3.*, src4.value as src4_value, src4.key 
as src4_key FROM src src4 JOIN (SELECT src2.*, src1.key as src1_key, src1.value 
as src1_value FROM src src1 JOIN src src2 ON src1.key = src2.key) src3 ON 
src3.src1_key = src4.key) src5
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+ A masked pattern was here 
 POSTHOOK: query: explain
 SELECT src5.src1_value FROM (SELECT src3.*, src4.value as src4_value, src4.key 
as src4_key FROM src src4 JOIN (SELECT src2.*, src1.key as src1_key, src1.value 
as src1_value FROM src src1 JOIN src src2 ON src1.key = src2.key) src3 ON 
src3.src1_key = src4.key) src5
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/join23.q.out
--
diff --git a/ql/src/test/results/clientpositive/join23.q.out 
b/ql/src/test/results/clientpositive/join23.q.out
index 64822b6..f89eac3 100644
--- a/ql/src/test/results/clientpositive/join23.q.out
+++ b/ql/src/test/results/clientpositive/join23.q.out
@@ -2,9 +2,13 @@ Warning: Shuffle Join JOIN[4][tables = [src1, src2]] in Stage 
'Stage-1:MAPRED' i
 PREHOOK: query: EXPLAIN
 SELECT *  FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 
SORT BY src1.key, src1.value, src2.key, src2.value
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+ A masked pattern was here 
 POSTHOOK: query: EXPLAIN
 SELECT *  FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 
SORT BY src1.key, src1.value, src2.key, src2.value
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/join25.q.out
--
diff --git a/ql/src/test/results/clientpositive/join25.q.out 
b/ql/src/test/results/clientpositive/join25.q.out
index 94db4a4..3e097e5 100644
--- a/ql/src/test/results/clientpositive/join25.q.out
+++ b/ql/src/test/results/clientpositive/join25.q.out
@@ -11,11 +11,17 @@ INSERT OVERWRITE TABLE dest_j1_n18
 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key)
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@dest_j1_n18
 POSTHOOK: query: EXPLAIN
 INSERT OVERWRITE TABLE dest_j1_n18 
 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@dest_j1_n18
 STAGE DEPENDENCIES:
   Stage-6 is a root stage
   Stage-5 depends on stages: Stage-6

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/join26.q.out
--
diff --git a/ql/src/test/results/clientpositive/join26.q.out 
b/ql/src/test/results/clientpositive/join26.q.out
index 0014af5..206a238 100644
--- a/ql/src/test/results/clientpositive/join26.q.out
+++ b/ql/src/test/results/clientpositive/join26.q.out
@@ -12,12 +12,22 @@ SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key) 
 JOIN srcpart z ON (x.key = z.key and z.ds='2008-04-08' and z.hr=11)
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Output: default@dest_j1_n10
 POSTHOOK: query: EXPLAIN EXTENDED
 INSERT OVERWRITE TABLE dest_j1_n10
 SELECT /*+ MAPJOIN(x,y) */ x.key, z.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key) 
 JOIN srcpart z ON (x.key = z.key and z.ds='2008-04-08' and z.hr=11)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@dest_j1_n10
 OPTIMIZED SQL: SELECT `t4`.`key`, `t0`.`value`, `t2`.`value` AS `value1`
 FROM (SELECT `key`, `value`, CAST('2008-04-08' AS STRING) AS `ds`, `hr`
 FROM `default`.`srcpart`

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/join27.q.out

[11/51] [partial] hive git commit: HIVE-18778: Needs to capture input/output entities in explain (Daniel Dai, reviewed by Thejas Nair)

2018-09-27 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/subquery_notin.q.out 
b/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
index c84477e..4692cb4 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
@@ -7,6 +7,8 @@ where src.key not in
 where s1.key > '2'
   )
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+ A masked pattern was here 
 POSTHOOK: query: explain
 select * 
 from src 
@@ -15,6 +17,8 @@ where src.key not in
 where s1.key > '2'
   )
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -296,6 +300,8 @@ where b.p_name not in
   where r <= 2 and b.p_mfgr = a.p_mfgr 
   )
 PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+ A masked pattern was here 
 POSTHOOK: query: explain
 select p_mfgr, b.p_name, p_size 
 from part b 
@@ -305,6 +311,8 @@ where b.p_name not in
   where r <= 2 and b.p_mfgr = a.p_mfgr 
   )
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -581,6 +589,8 @@ part where part.p_size not in
   where r <= 2
   )
 PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+ A masked pattern was here 
 POSTHOOK: query: explain
 select p_name, p_size 
 from 
@@ -590,6 +600,8 @@ part where part.p_size not in
   where r <= 2
   )
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -858,6 +870,8 @@ from part b where b.p_size not in
   where r <= 2 and b.p_mfgr = a.p_mfgr
   )
 PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+ A masked pattern was here 
 POSTHOOK: query: explain
 select p_mfgr, p_name, p_size
 from part b where b.p_size not in
@@ -866,6 +880,8 @@ from part b where b.p_size not in
   where r <= 2 and b.p_mfgr = a.p_mfgr
   )
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1416,10 +1432,18 @@ PREHOOK: query: explain
 select * 
 from T1_v where T1_v.key not in (select T2_v.key from T2_v)
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@t1_v
+PREHOOK: Input: default@t2_v
+ A masked pattern was here 
 POSTHOOK: query: explain
 select * 
 from T1_v where T1_v.key not in (select T2_v.key from T2_v)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@t1_v
+POSTHOOK: Input: default@t2_v
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1581,8 +1605,12 @@ POSTHOOK: Input: default@t2_v
  A masked pattern was here 
 PREHOOK: query: explain select * from part where p_brand <> 'Brand#14' AND 
p_size NOT IN (select (p_size*p_size) from part p where p.p_type = part.p_type 
) AND p_size <> 340
 PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+ A masked pattern was here 
 POSTHOOK: query: explain select * from part where p_brand <> 'Brand#14' AND 
p_size NOT IN (select (p_size*p_size) from part p where p.p_type = part.p_type 
) AND p_size <> 340
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1777,8 +1805,12 @@ POSTHOOK: Input: default@part
 Warning: Shuffle Join MERGEJOIN[38][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Reducer 2' is a cross product
 PREHOOK: query: explain select * from part  where (p_size-1) NOT IN (select 
min(p_size) from part group by p_type) order by p_partkey
 PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+ A masked pattern was here 
 POSTHOOK: query: explain select * from part  where (p_size-1) NOT IN (select 
min(p_size) from part group by p_type) order by p_partkey
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -1985,8 +2017,12 @@ POSTHOOK: Input: default@part
 Warning: Shuffle Join MERGEJOIN[36][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Reducer 2' is a cross product
 PREHOOK: query: explain select * from part where (p_partkey*p_size) NOT IN 
(select min(p_partkey) from part group by p_type)
 PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+ A masked pattern was here 
 POSTHOOK: query: explain select * from part where (p_partkey*p_size) NOT IN 
(select min(p_partkey) from part group by p_type)
 

[30/51] [partial] hive git commit: HIVE-18778: Needs to capture input/output entities in explain (Daniel Dai, reviewed by Thejas Nair)

2018-09-27 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/llap/correlationoptimizer4.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/correlationoptimizer4.q.out 
b/ql/src/test/results/clientpositive/llap/correlationoptimizer4.q.out
index 027779d..b4369db 100644
--- a/ql/src/test/results/clientpositive/llap/correlationoptimizer4.q.out
+++ b/ql/src/test/results/clientpositive/llap/correlationoptimizer4.q.out
@@ -52,12 +52,20 @@ FROM (SELECT y.key AS key, count(1) AS cnt
   FROM T2_n86 x JOIN T1_n146 y ON (x.key = y.key) JOIN T3_n34 z ON (y.key 
= z.key)
   GROUP BY y.key) tmp
 PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n146
+PREHOOK: Input: default@t2_n86
+PREHOOK: Input: default@t3_n34
+ A masked pattern was here 
 POSTHOOK: query: EXPLAIN
 SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
 FROM (SELECT y.key AS key, count(1) AS cnt
   FROM T2_n86 x JOIN T1_n146 y ON (x.key = y.key) JOIN T3_n34 z ON (y.key 
= z.key)
   GROUP BY y.key) tmp
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n146
+POSTHOOK: Input: default@t2_n86
+POSTHOOK: Input: default@t3_n34
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -223,12 +231,20 @@ FROM (SELECT y.key AS key, count(1) AS cnt
   FROM T2_n86 x JOIN T1_n146 y ON (x.key = y.key) JOIN T3_n34 z ON (y.key 
= z.key)
   GROUP BY y.key) tmp
 PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n146
+PREHOOK: Input: default@t2_n86
+PREHOOK: Input: default@t3_n34
+ A masked pattern was here 
 POSTHOOK: query: EXPLAIN
 SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
 FROM (SELECT y.key AS key, count(1) AS cnt
   FROM T2_n86 x JOIN T1_n146 y ON (x.key = y.key) JOIN T3_n34 z ON (y.key 
= z.key)
   GROUP BY y.key) tmp
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n146
+POSTHOOK: Input: default@t2_n86
+POSTHOOK: Input: default@t3_n34
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -394,12 +410,20 @@ FROM (SELECT y.key AS key, count(1) AS cnt
   FROM T2_n86 x JOIN T1_n146 y ON (x.key = y.key) JOIN T3_n34 z ON (y.key 
= z.key)
   GROUP BY y.key) tmp
 PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n146
+PREHOOK: Input: default@t2_n86
+PREHOOK: Input: default@t3_n34
+ A masked pattern was here 
 POSTHOOK: query: EXPLAIN
 SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
 FROM (SELECT y.key AS key, count(1) AS cnt
   FROM T2_n86 x JOIN T1_n146 y ON (x.key = y.key) JOIN T3_n34 z ON (y.key 
= z.key)
   GROUP BY y.key) tmp
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n146
+POSTHOOK: Input: default@t2_n86
+POSTHOOK: Input: default@t3_n34
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -560,12 +584,20 @@ FROM (SELECT x.key AS key, count(1) AS cnt
   FROM T2_n86 x LEFT OUTER JOIN T1_n146 y ON (x.key = y.key) LEFT OUTER 
JOIN T3_n34 z ON (y.key = z.key)
   GROUP BY x.key) tmp
 PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n146
+PREHOOK: Input: default@t2_n86
+PREHOOK: Input: default@t3_n34
+ A masked pattern was here 
 POSTHOOK: query: EXPLAIN
 SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
 FROM (SELECT x.key AS key, count(1) AS cnt
   FROM T2_n86 x LEFT OUTER JOIN T1_n146 y ON (x.key = y.key) LEFT OUTER 
JOIN T3_n34 z ON (y.key = z.key)
   GROUP BY x.key) tmp
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n146
+POSTHOOK: Input: default@t2_n86
+POSTHOOK: Input: default@t3_n34
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -722,12 +754,20 @@ FROM (SELECT x.key AS key, count(1) AS cnt
   FROM T2_n86 x LEFT OUTER JOIN T1_n146 y ON (x.key = y.key) LEFT OUTER 
JOIN T3_n34 z ON (y.key = z.key)
   GROUP BY x.key) tmp
 PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n146
+PREHOOK: Input: default@t2_n86
+PREHOOK: Input: default@t3_n34
+ A masked pattern was here 
 POSTHOOK: query: EXPLAIN
 SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
 FROM (SELECT x.key AS key, count(1) AS cnt
   FROM T2_n86 x LEFT OUTER JOIN T1_n146 y ON (x.key = y.key) LEFT OUTER 
JOIN T3_n34 z ON (y.key = z.key)
   GROUP BY x.key) tmp
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n146
+POSTHOOK: Input: default@t2_n86
+POSTHOOK: Input: default@t3_n34
+ A masked pattern was here 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -884,12 +924,20 @@ FROM (SELECT y.key AS key, count(1) AS cnt
   FROM T2_n86 x LEFT OUTER JOIN T1_n146 y ON (x.key = y.key) LEFT OUTER 
JOIN T3_n34 z ON (y.key = z.key)
   GROUP BY y.key) tmp
 PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n146
+PREHOOK: Input: default@t2_n86
+PREHOOK: 

[37/51] [partial] hive git commit: HIVE-18778: Needs to capture input/output entities in explain (Daniel Dai, reviewed by Thejas Nair)

2018-09-27 Thread daijy
http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/input6.q.out
--
diff --git a/ql/src/test/results/clientpositive/input6.q.out 
b/ql/src/test/results/clientpositive/input6.q.out
index f6a5e43..5fd0859 100644
--- a/ql/src/test/results/clientpositive/input6.q.out
+++ b/ql/src/test/results/clientpositive/input6.q.out
@@ -10,10 +10,14 @@ PREHOOK: query: EXPLAIN
 FROM src1
 INSERT OVERWRITE TABLE dest1_n35 SELECT src1.key, src1.value WHERE src1.key is 
null
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@dest1_n35
 POSTHOOK: query: EXPLAIN
 FROM src1
 INSERT OVERWRITE TABLE dest1_n35 SELECT src1.key, src1.value WHERE src1.key is 
null
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@dest1_n35
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/input7.q.out
--
diff --git a/ql/src/test/results/clientpositive/input7.q.out 
b/ql/src/test/results/clientpositive/input7.q.out
index 26add5b..8b9898a 100644
--- a/ql/src/test/results/clientpositive/input7.q.out
+++ b/ql/src/test/results/clientpositive/input7.q.out
@@ -10,10 +10,14 @@ PREHOOK: query: EXPLAIN
 FROM src1
 INSERT OVERWRITE TABLE dest1_n167 SELECT NULL, src1.key
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@dest1_n167
 POSTHOOK: query: EXPLAIN
 FROM src1
 INSERT OVERWRITE TABLE dest1_n167 SELECT NULL, src1.key
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@dest1_n167
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/input8.q.out
--
diff --git a/ql/src/test/results/clientpositive/input8.q.out 
b/ql/src/test/results/clientpositive/input8.q.out
index da3e42d..3c533a8 100644
--- a/ql/src/test/results/clientpositive/input8.q.out
+++ b/ql/src/test/results/clientpositive/input8.q.out
@@ -10,10 +10,14 @@ PREHOOK: query: EXPLAIN
 FROM src1 
 INSERT OVERWRITE TABLE dest1_n28 SELECT 4 + NULL, src1.key - NULL, NULL + NULL
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@dest1_n28
 POSTHOOK: query: EXPLAIN
 FROM src1 
 INSERT OVERWRITE TABLE dest1_n28 SELECT 4 + NULL, src1.key - NULL, NULL + NULL
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@dest1_n28
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/input9.q.out
--
diff --git a/ql/src/test/results/clientpositive/input9.q.out 
b/ql/src/test/results/clientpositive/input9.q.out
index 1424957..3085589 100644
--- a/ql/src/test/results/clientpositive/input9.q.out
+++ b/ql/src/test/results/clientpositive/input9.q.out
@@ -10,10 +10,14 @@ PREHOOK: query: EXPLAIN
 FROM src1
 INSERT OVERWRITE TABLE dest1_n159 SELECT NULL, src1.key where NULL = NULL
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@dest1_n159
 POSTHOOK: query: EXPLAIN
 FROM src1
 INSERT OVERWRITE TABLE dest1_n159 SELECT NULL, src1.key where NULL = NULL
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@dest1_n159
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5

http://git-wip-us.apache.org/repos/asf/hive/blob/b356aae0/ql/src/test/results/clientpositive/input_columnarserde.q.out
--
diff --git a/ql/src/test/results/clientpositive/input_columnarserde.q.out 
b/ql/src/test/results/clientpositive/input_columnarserde.q.out
index ae51d74..3dc138a 100644
--- a/ql/src/test/results/clientpositive/input_columnarserde.q.out
+++ b/ql/src/test/results/clientpositive/input_columnarserde.q.out
@@ -20,10 +20,14 @@ PREHOOK: query: EXPLAIN
 FROM src_thrift
 INSERT OVERWRITE TABLE input_columnarserde SELECT src_thrift.lint, 
src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, 
src_thrift.astring DISTRIBUTE BY 1
 PREHOOK: type: QUERY
+PREHOOK: Input: default@src_thrift
+PREHOOK: Output: default@input_columnarserde
 POSTHOOK: query: EXPLAIN
 FROM src_thrift
 INSERT OVERWRITE TABLE input_columnarserde SELECT src_thrift.lint, 
src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, 
src_thrift.astring DISTRIBUTE BY 1
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_thrift

hive git commit: HIVE-21124: HPL/SQL does not support the CREATE TABLE LIKE statement (Baoning He, reviewed by Daniel Dai)

2019-01-16 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/master 4d5d80609 -> 8e7c3b340


HIVE-21124: HPL/SQL does not support the CREATE TABLE LIKE statement (Baoning 
He, reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8e7c3b34
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8e7c3b34
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8e7c3b34

Branch: refs/heads/master
Commit: 8e7c3b340f36a3b76453338b04b8cda360eeaa70
Parents: 4d5d806
Author: Daniel Dai 
Authored: Wed Jan 16 22:21:57 2019 -0800
Committer: Daniel Dai 
Committed: Wed Jan 16 22:21:57 2019 -0800

--
 hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4 | 2 +-
 hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java   | 3 +++
 .../src/test/java/org/apache/hive/hplsql/TestHplsqlOffline.java | 5 +
 hplsql/src/test/queries/offline/create_table.sql| 1 +
 hplsql/src/test/results/offline/create_table.out.txt| 2 ++
 5 files changed, 12 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8e7c3b34/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
--
diff --git a/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4 
b/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
index 186b617..77c2e2c 100644
--- a/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
+++ b/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
@@ -227,7 +227,7 @@ create_local_temp_table_stmt :
  ;
  
 create_table_definition :
-  (T_AS? T_OPEN_P select_stmt T_CLOSE_P | T_AS? select_stmt | T_OPEN_P 
create_table_columns T_CLOSE_P) create_table_options?
+  (T_AS? T_OPEN_P select_stmt T_CLOSE_P | T_AS? select_stmt | T_OPEN_P 
create_table_columns T_CLOSE_P | T_LIKE table_name) create_table_options?
  ;
  
 create_table_columns : 

http://git-wip-us.apache.org/repos/asf/hive/blob/8e7c3b34/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java
--
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java 
b/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java
index 0094e82..eabb9fa 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java
@@ -172,6 +172,9 @@ public class Stmt {
   }
   exec.append(sql, ctx.T_CLOSE_P().getText(), last, 
ctx.T_CLOSE_P().getSymbol());
 }
+else if (ctx.T_LIKE() != null) {
+  sql.append(" ").append(ctx.T_LIKE().getText()).append(" 
").append(evalPop(ctx.table_name()));
+}
 // CREATE TABLE AS SELECT statement
 else {
   exec.append(sql, evalPop(ctx.select_stmt()).toString(), last, 
ctx.select_stmt().getStart());

http://git-wip-us.apache.org/repos/asf/hive/blob/8e7c3b34/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlOffline.java
--
diff --git a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlOffline.java 
b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlOffline.java
index c908191..b48c8c5 100644
--- a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlOffline.java
+++ b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlOffline.java
@@ -34,6 +34,11 @@ public class TestHplsqlOffline {
   private final ByteArrayOutputStream out = new ByteArrayOutputStream();
 
   @Test
+  public void testCreateTable() throws Exception {
+run("create_table");
+  }
+
+  @Test
   public void testCreateTableDb2() throws Exception {
 run("create_table_db2");
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/8e7c3b34/hplsql/src/test/queries/offline/create_table.sql
--
diff --git a/hplsql/src/test/queries/offline/create_table.sql 
b/hplsql/src/test/queries/offline/create_table.sql
new file mode 100644
index 000..9fde50f
--- /dev/null
+++ b/hplsql/src/test/queries/offline/create_table.sql
@@ -0,0 +1 @@
+CREATE TABLE tbl LIKE tbl2;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/8e7c3b34/hplsql/src/test/results/offline/create_table.out.txt
--
diff --git a/hplsql/src/test/results/offline/create_table.out.txt 
b/hplsql/src/test/results/offline/create_table.out.txt
new file mode 100644
index 000..fac30ed
--- /dev/null
+++ b/hplsql/src/test/results/offline/create_table.out.txt
@@ -0,0 +1,2 @@
+Ln:1 CREATE TABLE
+Ln:1 CREATE TABLE tbl LIKE tbl2
\ No newline at end of file



hive git commit: HIVE-20978: "hive.jdbc.*" should add to sqlStdAuthSafeVarNameRegexes (Daniel Dai, reviewed by Jesus Camacho Rodriguez)

2018-11-28 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/master da34c3c25 -> 073c7abb1


HIVE-20978: "hive.jdbc.*" should add to sqlStdAuthSafeVarNameRegexes (Daniel 
Dai, reviewed by Jesus Camacho Rodriguez)

Signed-off-by: Jesus Camacho Rodriguez 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/073c7abb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/073c7abb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/073c7abb

Branch: refs/heads/master
Commit: 073c7abb196b475d23e4b09305ecb6821af42d4e
Parents: da34c3c
Author: Daniel Dai 
Authored: Wed Nov 28 23:46:52 2018 -0800
Committer: Daniel Dai 
Committed: Wed Nov 28 23:47:08 2018 -0800

--
 common/src/java/org/apache/hadoop/hive/conf/HiveConf.java | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/073c7abb/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 4919a4e..c245671 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -5576,6 +5576,7 @@ public class HiveConf extends Configuration {
 "hive\\.index\\..*",
 "hive\\.index\\..*",
 "hive\\.intermediate\\..*",
+"hive\\.jdbc\\..*",
 "hive\\.join\\..*",
 "hive\\.limit\\..*",
 "hive\\.log\\..*",



hive git commit: HIVE-21013: JdbcStorageHandler fail to find partition column in Oracle (Daniel Dai, reviewed by Jesus Camacho Rodriguez)

2018-12-07 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/master 4f3377c6b -> aae392ff8


HIVE-21013: JdbcStorageHandler fail to find partition column in Oracle (Daniel 
Dai, reviewed by Jesus Camacho Rodriguez)

Signed-off-by: Jesus Camacho Rodriguez 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/aae392ff
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/aae392ff
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/aae392ff

Branch: refs/heads/master
Commit: aae392ff85ba1035d8fa8d4cf879b615d3faf88f
Parents: 4f3377c
Author: Daniel Dai 
Authored: Fri Dec 7 11:22:50 2018 -0800
Committer: Daniel Dai 
Committed: Fri Dec 7 11:22:59 2018 -0800

--
 .../apache/hive/storage/jdbc/dao/GenericJdbcDatabaseAccessor.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/aae392ff/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/GenericJdbcDatabaseAccessor.java
--
diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/GenericJdbcDatabaseAccessor.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/GenericJdbcDatabaseAccessor.java
index a971183..79c16df 100644
--- 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/GenericJdbcDatabaseAccessor.java
+++ 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/GenericJdbcDatabaseAccessor.java
@@ -237,7 +237,7 @@ public class GenericJdbcDatabaseAccessor implements 
DatabaseAccessor {
   Matcher m = fromPattern.matcher(sql);
   Preconditions.checkArgument(m.matches());
   String queryBeforeFrom = m.group(1);
-  String queryAfterFrom = m.group(2);
+  String queryAfterFrom = " " + m.group(2) + " ";
 
   Character[] possibleDelimits = new Character[] {'`', '\"', ' '};
   for (Character possibleDelimit : possibleDelimits) {



hive git commit: HIVE-20944: Not validate stats during query compilation (Daniel Dai, reviewed by Ashutosh Chauhan)

2018-11-20 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/master c392bccdb -> 2b882d5b1


HIVE-20944: Not validate stats during query compilation (Daniel Dai, reviewed 
by Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2b882d5b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2b882d5b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2b882d5b

Branch: refs/heads/master
Commit: 2b882d5b1cb40082dd13eb88147e7ffc97c7b314
Parents: c392bcc
Author: Daniel Dai 
Authored: Tue Nov 20 21:16:26 2018 -0800
Committer: Daniel Dai 
Committed: Tue Nov 20 21:16:43 2018 -0800

--
 .../apache/hadoop/hive/ql/stats/StatsUtils.java |   4 +-
 .../results/clientpositive/acid_stats4.q.out|  12 +-
 .../results/clientpositive/acid_stats5.q.out|  18 +--
 .../llap/dynpart_sort_optimization_acid.q.out   |  60 -
 .../clientpositive/llap/tez_nway_join.q.out | 122 +--
 .../clientpositive/llap/vector_if_expr_2.q.out  |  10 +-
 .../clientpositive/llap/vector_like_2.q.out |  10 +-
 .../clientpositive/llap/vector_udf2.q.out   |  16 +--
 .../llap/vectorized_mapjoin3.q.out  |  72 +--
 .../clientpositive/stats_partial_size.q.out |  12 +-
 10 files changed, 168 insertions(+), 168 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2b882d5b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
index b7adc48..bb4196f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
@@ -385,7 +385,7 @@ public class StatsUtils {
 // skip the step to connect to the metastore.
 if (neededColsToRetrieve.size() > 0 && partNames.size() > 0) {
   aggrStats = Hive.get().getAggrColStatsFor(table.getDbName(), 
table.getTableName(),
-  neededColsToRetrieve, partNames, true);
+  neededColsToRetrieve, partNames, false);
 }
 
 boolean statsRetrieved = aggrStats != null &&
@@ -1025,7 +1025,7 @@ public class StatsUtils {
 List stats = null;
 try {
   List colStat = Hive.get().getTableColumnStatistics(
-  dbName, tabName, colStatsToRetrieve, true);
+  dbName, tabName, colStatsToRetrieve, false);
   stats = convertColStats(colStat, tabName);
 } catch (HiveException e) {
   LOG.error("Failed to retrieve table statistics: ", e);

http://git-wip-us.apache.org/repos/asf/hive/blob/2b882d5b/ql/src/test/results/clientpositive/acid_stats4.q.out
--
diff --git a/ql/src/test/results/clientpositive/acid_stats4.q.out 
b/ql/src/test/results/clientpositive/acid_stats4.q.out
index d317ed0..b36aa4a 100644
--- a/ql/src/test/results/clientpositive/acid_stats4.q.out
+++ b/ql/src/test/results/clientpositive/acid_stats4.q.out
@@ -265,19 +265,19 @@ STAGE PLANS:
   Map Operator Tree:
   TableScan
 alias: stats_nonpart2
-Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column 
stats: NONE
+Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column 
stats: COMPLETE
 Select Operator
   expressions: key2 (type: int)
   outputColumnNames: key2
-  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE 
Column stats: NONE
+  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE 
Column stats: COMPLETE
   Group By Operator
 aggregations: count(key2)
 mode: hash
 outputColumnNames: _col0
-Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE 
Column stats: NONE
+Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
 Reduce Output Operator
   sort order: 
-  Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE 
Column stats: NONE
+  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
   value expressions: _col0 (type: bigint)
   Execution mode: vectorized
   Reduce Operator Tree:
@@ -285,10 +285,10 @@ STAGE PLANS:
   aggregations: count(VALUE._col0)
   mode: mergepartial
   outputColumnNames: _col0
-  Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column 
stats: NONE
+  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: COMPLETE
   File Output Operator

hive git commit: HIVE-20937: Postgres jdbc query fail with "LIMIT must not be negative" (Daniel Dai, reviewed by Thejas Nair)

2018-11-19 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/master 5553c59e3 -> 1fe471d31


HIVE-20937: Postgres jdbc query fail with "LIMIT must not be negative" (Daniel 
Dai, reviewed by Thejas Nair)

Signed-off-by: Thejas M Nair 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1fe471d3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1fe471d3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1fe471d3

Branch: refs/heads/master
Commit: 1fe471d31867fdffd8fd62ac5180a9377bef13e5
Parents: 5553c59
Author: Daniel Dai 
Authored: Mon Nov 19 15:04:34 2018 -0800
Committer: Daniel Dai 
Committed: Mon Nov 19 15:04:42 2018 -0800

--
 .../apache/hive/storage/jdbc/dao/MsSqlDatabaseAccessor.java   | 7 ++-
 .../apache/hive/storage/jdbc/dao/OracleDatabaseAccessor.java  | 6 ++
 .../hive/storage/jdbc/dao/PostgresDatabaseAccessor.java   | 6 ++
 3 files changed, 18 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1fe471d3/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/MsSqlDatabaseAccessor.java
--
diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/MsSqlDatabaseAccessor.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/MsSqlDatabaseAccessor.java
index 5c5455f..d6e117e 100644
--- 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/MsSqlDatabaseAccessor.java
+++ 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/MsSqlDatabaseAccessor.java
@@ -28,6 +28,9 @@ public class MsSqlDatabaseAccessor extends 
GenericJdbcDatabaseAccessor {
 if (offset == 0) {
   return addLimitToQuery(sql, limit);
 } else {
+  if (limit == -1) {
+return sql;
+  }
   // Order by is not necessary, but MS SQL require it to use FETCH
   return sql + " ORDER BY 1 OFFSET " + offset + " ROWS FETCH NEXT " + 
limit + " ROWS ONLY";
 }
@@ -35,7 +38,9 @@ public class MsSqlDatabaseAccessor extends 
GenericJdbcDatabaseAccessor {
 
   @Override
   protected String addLimitToQuery(String sql, int limit) {
+if (limit == -1) {
+  return sql;
+}
 return sql + " {LIMIT " + limit + "}";
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/1fe471d3/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/OracleDatabaseAccessor.java
--
diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/OracleDatabaseAccessor.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/OracleDatabaseAccessor.java
index 39c4cda..4a993fb 100644
--- 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/OracleDatabaseAccessor.java
+++ 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/OracleDatabaseAccessor.java
@@ -31,6 +31,9 @@ public class OracleDatabaseAccessor extends 
GenericJdbcDatabaseAccessor {
 if (offset == 0) {
   return addLimitToQuery(sql, limit);
 } else {
+  if (limit == -1) {
+return sql;
+  }
   // A simple ROWNUM > offset and ROWNUM <= (offset + limit) won't work, 
it will return nothing
   return "SELECT * FROM (SELECT t.*, ROWNUM AS " + ROW_NUM_COLUMN_NAME + " 
FROM (" + sql + ") t) WHERE "
   +  ROW_NUM_COLUMN_NAME + " >" + offset + " AND " + 
ROW_NUM_COLUMN_NAME + " <=" + (offset + limit);
@@ -40,6 +43,9 @@ public class OracleDatabaseAccessor extends 
GenericJdbcDatabaseAccessor {
 
   @Override
   protected String addLimitToQuery(String sql, int limit) {
+if (limit == -1) {
+  return sql;
+}
 return "SELECT * FROM (" + sql + ") WHERE ROWNUM <= " + limit;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/1fe471d3/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/PostgresDatabaseAccessor.java
--
diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/PostgresDatabaseAccessor.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/PostgresDatabaseAccessor.java
index c0280fd..866b82f 100644
--- 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/PostgresDatabaseAccessor.java
+++ 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/PostgresDatabaseAccessor.java
@@ -28,12 +28,18 @@ public class PostgresDatabaseAccessor extends 
GenericJdbcDatabaseAccessor {
 if (offset == 0) {
   return addLimitToQuery(sql, limit);
 } else {
+  if (limit == -1) {
+return sql;
+  }
   return sql + " LIMIT " + limit + " OFFSET " + offset;
 }
   }
 
   @Override
   protected String addLimitToQuery(String sql, int limit) {
+if (limit == -1) {
+  return sql;
+}
 return sql + " LIMIT 

hive git commit: HIVE-21113: For HPL/SQL that contains boolean expression with NOT, incorrect SQL may be generated (Baoning He, reviewed by Daniel Dai)

2019-01-11 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/master 28db173b9 -> a3aa074d7


HIVE-21113: For HPL/SQL that contains boolean expression with NOT, incorrect 
SQL may be generated (Baoning He, reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a3aa074d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a3aa074d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a3aa074d

Branch: refs/heads/master
Commit: a3aa074d7b27b3c6037ee1c0f6afb03044983c92
Parents: 28db173
Author: Daniel Dai 
Authored: Fri Jan 11 16:36:01 2019 -0800
Committer: Daniel Dai 
Committed: Fri Jan 11 16:36:01 2019 -0800

--
 hplsql/src/main/java/org/apache/hive/hplsql/Expression.java | 3 +++
 hplsql/src/test/queries/offline/select.sql  | 6 +-
 hplsql/src/test/results/offline/select.out.txt  | 7 ++-
 3 files changed, 14 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/a3aa074d/hplsql/src/main/java/org/apache/hive/hplsql/Expression.java
--
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Expression.java 
b/hplsql/src/main/java/org/apache/hive/hplsql/Expression.java
index 188b173..1002581 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Expression.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Expression.java
@@ -148,6 +148,9 @@ public class Expression {
   public void execBoolSql(HplsqlParser.Bool_exprContext ctx) {
 StringBuilder sql = new StringBuilder();
 if (ctx.T_OPEN_P() != null) {
+  if (ctx.T_NOT() != null) {
+sql.append(ctx.T_NOT().getText() + " ");
+  }
   sql.append("(");
   sql.append(evalPop(ctx.bool_expr(0)).toString());
   sql.append(")");

http://git-wip-us.apache.org/repos/asf/hive/blob/a3aa074d/hplsql/src/test/queries/offline/select.sql
--
diff --git a/hplsql/src/test/queries/offline/select.sql 
b/hplsql/src/test/queries/offline/select.sql
index 0b6912e..cd5a233 100644
--- a/hplsql/src/test/queries/offline/select.sql
+++ b/hplsql/src/test/queries/offline/select.sql
@@ -39,4 +39,8 @@ FROM
 
   LEFT OUTER JOIN TEST.LOCATION LOC
ON DLTA_POC.SE_KEY_POC = LOC.LOC_ID
-   AND LOC.LOCATION_END_DT = DATE '-12-31' ;
\ No newline at end of file
+   AND LOC.LOCATION_END_DT = DATE '-12-31' ;
+
+SELECT *
+  FROM a
+  WHERE NOT (1 = 2)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/a3aa074d/hplsql/src/test/results/offline/select.out.txt
--
diff --git a/hplsql/src/test/results/offline/select.out.txt 
b/hplsql/src/test/results/offline/select.out.txt
index 529f0b5..849a1ea 100644
--- a/hplsql/src/test/results/offline/select.out.txt
+++ b/hplsql/src/test/results/offline/select.out.txt
@@ -31,4 +31,9 @@ FROM DLTA_POC LEFT OUTER JOIN TEST3_DB.TET ORG ON 
DLTA_POC.YS_NO = ORG.EM_CODE_A
AND DLTA_POC.AREA_NO = ORG.AREA_CODE_2
AND DLTA_POC.GNT_POC = ORG.GEN_CD LEFT OUTER JOIN TEST.LOCATION LOC ON 
DLTA_POC.SE_KEY_POC = LOC.LOC_ID
AND LOC.LOCATION_END_DT = DATE '-12-31'
-Ln:31 Not executed - offline mode set
\ No newline at end of file
+Ln:31 Not executed - offline mode set
+Ln:44 SELECT
+Ln:44 SELECT *
+  FROM a
+  WHERE NOT (1 = 2)
+Ln:44 Not executed - offline mode set
\ No newline at end of file



hive git commit: HIVE-21082: In HPL/SQL, declare statement does not support variable of type character (Baoning He, reviewed by Daniel Dai)

2019-01-09 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/master e92df8dd6 -> 4d03e31d3


HIVE-21082: In HPL/SQL, declare statement does not support variable of type 
character (Baoning He, reviewed by Daniel Dai)

Signed-off-by: Daniel Dai 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4d03e31d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4d03e31d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4d03e31d

Branch: refs/heads/master
Commit: 4d03e31d353121beccb6031e4a0d19d9d865e404
Parents: e92df8d
Author: Daniel Dai 
Authored: Wed Jan 9 10:35:42 2019 -0800
Committer: Daniel Dai 
Committed: Wed Jan 9 10:36:10 2019 -0800

--
 hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4   | 1 +
 hplsql/src/main/java/org/apache/hive/hplsql/Var.java  | 2 +-
 .../src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java | 5 +
 hplsql/src/test/queries/local/declare4.sql| 7 +++
 hplsql/src/test/results/local/declare4.out.txt| 6 ++
 5 files changed, 20 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4d03e31d/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
--
diff --git a/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4 
b/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
index 9dd898e..186b617 100644
--- a/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
+++ b/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
@@ -363,6 +363,7 @@ alter_table_add_constraint_item :
  
 dtype :  // Data types
T_CHAR
+ | T_CHARACTER
  | T_BIGINT
  | T_BINARY_DOUBLE
  | T_BINARY_FLOAT

http://git-wip-us.apache.org/repos/asf/hive/blob/4d03e31d/hplsql/src/main/java/org/apache/hive/hplsql/Var.java
--
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Var.java 
b/hplsql/src/main/java/org/apache/hive/hplsql/Var.java
index 2421c60..a117cb6 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Var.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Var.java
@@ -327,7 +327,7 @@ public class Var {
   return Type.BIGINT;
 }
 else if (type.equalsIgnoreCase("CHAR") || type.equalsIgnoreCase("VARCHAR") 
|| type.equalsIgnoreCase("VARCHAR2") || 
- type.equalsIgnoreCase("STRING") || type.equalsIgnoreCase("XML")) {
+ type.equalsIgnoreCase("STRING") || type.equalsIgnoreCase("XML") 
|| type.equalsIgnoreCase("CHARACTER")) {
   return Type.STRING;
 }
 else if (type.equalsIgnoreCase("DEC") || type.equalsIgnoreCase("DECIMAL") 
|| type.equalsIgnoreCase("NUMERIC") ||

http://git-wip-us.apache.org/repos/asf/hive/blob/4d03e31d/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
--
diff --git a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java 
b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
index cac2f8b..cd0e938 100644
--- a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
+++ b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
@@ -182,6 +182,11 @@ public class TestHplsqlLocal {
   public void testDeclare3() throws Exception {
 run("declare3");
   }
+
+  @Test
+  public void testDeclare4() throws Exception {
+run("declare4");
+  }
   
   @Test
   public void testDeclareCondition() throws Exception {

http://git-wip-us.apache.org/repos/asf/hive/blob/4d03e31d/hplsql/src/test/queries/local/declare4.sql
--
diff --git a/hplsql/src/test/queries/local/declare4.sql 
b/hplsql/src/test/queries/local/declare4.sql
new file mode 100644
index 000..8928292
--- /dev/null
+++ b/hplsql/src/test/queries/local/declare4.sql
@@ -0,0 +1,7 @@
+declare
+  code character(10) not null := 'a';
+  code1 character not null := 'ab';
+begin
+  print code;
+  print code1;
+end;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/4d03e31d/hplsql/src/test/results/local/declare4.out.txt
--
diff --git a/hplsql/src/test/results/local/declare4.out.txt 
b/hplsql/src/test/results/local/declare4.out.txt
new file mode 100644
index 000..5f9dcc2
--- /dev/null
+++ b/hplsql/src/test/results/local/declare4.out.txt
@@ -0,0 +1,6 @@
+Ln:2 DECLARE code character = 'a'
+Ln:3 DECLARE code1 character = 'ab'
+Ln:5 PRINT
+a
+Ln:6 PRINT
+ab
\ No newline at end of file



hive git commit: HIVE-20355: Clean up parameter of HiveConnection.setSchema (Daniel Dai, reviewed by Sankar Hariappan)

2018-09-15 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/master 01083a61c -> 2c8e67942


HIVE-20355: Clean up parameter of HiveConnection.setSchema (Daniel Dai, 
reviewed by Sankar Hariappan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2c8e6794
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2c8e6794
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2c8e6794

Branch: refs/heads/master
Commit: 2c8e679429352e6f550e3d9bdfa41860095d4e06
Parents: 01083a6
Author: Daniel Dai 
Authored: Sat Sep 15 11:51:50 2018 -0700
Committer: Daniel Dai 
Committed: Sat Sep 15 11:51:50 2018 -0700

--
 jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2c8e6794/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
--
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java 
b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
index 458158e..70cc34d 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
@@ -1492,6 +1492,9 @@ public class HiveConnection implements 
java.sql.Connection {
 if (schema == null || schema.isEmpty()) {
   throw new SQLException("Schema name is null or empty");
 }
+if (schema.contains(";")) {
+  throw new SQLException("invalid schema name");
+}
 Statement stmt = createStatement();
 stmt.execute("use " + schema);
 stmt.close();



hive git commit: HIVE-20462: "CREATE VIEW IF NOT EXISTS" fails if view already exists (Daniel Dai, reviewed by Jesus Camacho Rodriguez)

2018-09-15 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/master e69129454 -> be1130d56


HIVE-20462: "CREATE VIEW IF NOT EXISTS" fails if view already exists (Daniel 
Dai, reviewed by Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/be1130d5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/be1130d5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/be1130d5

Branch: refs/heads/master
Commit: be1130d567bd3b075fa3364215bb561e221506ed
Parents: e691294
Author: Daniel Dai 
Authored: Sat Sep 15 15:59:46 2018 -0700
Committer: Daniel Dai 
Committed: Sat Sep 15 15:59:46 2018 -0700

--
 ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java |  2 +-
 ql/src/test/queries/clientpositive/create_view.q|  2 ++
 ql/src/test/results/clientpositive/create_view.q.out| 10 ++
 3 files changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/be1130d5/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 79cb54e..8aa971a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -5155,7 +5155,7 @@ public class DDLTask extends Task implements 
Serializable {
 }
   }
 
-  if (!crtView.isReplace()) {
+  if (!crtView.isReplace() && !crtView.getIfNotExists()) {
 // View already exists, thus we should be replacing
 throw new 
HiveException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(crtView.getViewName()));
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/be1130d5/ql/src/test/queries/clientpositive/create_view.q
--
diff --git a/ql/src/test/queries/clientpositive/create_view.q 
b/ql/src/test/queries/clientpositive/create_view.q
index dce2866..3fe14f4 100644
--- a/ql/src/test/queries/clientpositive/create_view.q
+++ b/ql/src/test/queries/clientpositive/create_view.q
@@ -237,6 +237,8 @@ select * from view17;
 create view view18 as select v+1 from (select 1 as v) t;
 select * from view18;
 
+-- create view if not exists
+create view if not exists view18 as select v+1 from (select 1 as v) t;
 
 DROP VIEW view1;
 DROP VIEW view2;

http://git-wip-us.apache.org/repos/asf/hive/blob/be1130d5/ql/src/test/results/clientpositive/create_view.q.out
--
diff --git a/ql/src/test/results/clientpositive/create_view.q.out 
b/ql/src/test/results/clientpositive/create_view.q.out
index d2c516f..99d1543 100644
--- a/ql/src/test/results/clientpositive/create_view.q.out
+++ b/ql/src/test/results/clientpositive/create_view.q.out
@@ -1604,6 +1604,16 @@ POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Input: default@view18
  A masked pattern was here 
 2
+PREHOOK: query: create view if not exists view18 as select v+1 from (select 1 
as v) t
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: database:default
+PREHOOK: Output: default@view18
+POSTHOOK: query: create view if not exists view18 as select v+1 from (select 1 
as v) t
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@view18
 PREHOOK: query: DROP VIEW view1
 PREHOOK: type: DROPVIEW
 PREHOOK: Input: default@view1



hive git commit: HIVE-20494: GenericUDFRestrictInformationSchema is broken after HIVE-19440 (Daniel Dai, reviewed by Vaibhav Gumashta)

2018-09-17 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/master 393b382af -> e4f4283c7


HIVE-20494: GenericUDFRestrictInformationSchema is broken after HIVE-19440 
(Daniel Dai, reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e4f4283c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e4f4283c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e4f4283c

Branch: refs/heads/master
Commit: e4f4283c76dd3a13d0bf3a6dc680bd37e8ae5cd4
Parents: 393b382
Author: Daniel Dai 
Authored: Mon Sep 17 14:39:33 2018 -0700
Committer: Daniel Dai 
Committed: Mon Sep 17 14:39:33 2018 -0700

--
 .../udf/generic/GenericUDFRestrictInformationSchema.java | 11 +--
 1 file changed, 5 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/e4f4283c/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRestrictInformationSchema.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRestrictInformationSchema.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRestrictInformationSchema.java
index 3635a5a..03875ff 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRestrictInformationSchema.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRestrictInformationSchema.java
@@ -97,12 +97,11 @@ public class GenericUDFRestrictInformationSchema extends 
GenericUDF {
 LOG.warn("Error instantiating 
hive.security.metastore.authorization.manager", e);
   }
 }
-
-if (enableHS2PolicyProvider || enableMetastorePolicyProvider) {
-  enabled = new BooleanWritable(true);
-} else {
-  enabled = new BooleanWritable(false);
-}
+  }
+  if (enableHS2PolicyProvider || enableMetastorePolicyProvider) {
+enabled = new BooleanWritable(true);
+  } else {
+enabled = new BooleanWritable(false);
   }
 }
 



hive git commit: HIVE-20494: GenericUDFRestrictInformationSchema is broken after HIVE-19440 (Daniel Dai, reviewed by Vaibhav Gumashta)

2018-09-17 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/branch-3 550cc6199 -> f8f0ca50a


HIVE-20494: GenericUDFRestrictInformationSchema is broken after HIVE-19440 
(Daniel Dai, reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f8f0ca50
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f8f0ca50
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f8f0ca50

Branch: refs/heads/branch-3
Commit: f8f0ca50a35ddd05785cb1581c41af853ef9a5ef
Parents: 550cc61
Author: Daniel Dai 
Authored: Mon Sep 17 14:40:33 2018 -0700
Committer: Daniel Dai 
Committed: Mon Sep 17 14:40:33 2018 -0700

--
 .../udf/generic/GenericUDFRestrictInformationSchema.java | 11 +--
 1 file changed, 5 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f8f0ca50/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRestrictInformationSchema.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRestrictInformationSchema.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRestrictInformationSchema.java
index 3635a5a..03875ff 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRestrictInformationSchema.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRestrictInformationSchema.java
@@ -97,12 +97,11 @@ public class GenericUDFRestrictInformationSchema extends 
GenericUDF {
 LOG.warn("Error instantiating 
hive.security.metastore.authorization.manager", e);
   }
 }
-
-if (enableHS2PolicyProvider || enableMetastorePolicyProvider) {
-  enabled = new BooleanWritable(true);
-} else {
-  enabled = new BooleanWritable(false);
-}
+  }
+  if (enableHS2PolicyProvider || enableMetastorePolicyProvider) {
+enabled = new BooleanWritable(true);
+  } else {
+enabled = new BooleanWritable(false);
   }
 }
 



[1/2] hive git commit: HIVE-20225: SerDe to support Teradata Binary Format (Lu Li, reviewed by Carl Steinbach)

2018-09-11 Thread daijy
Repository: hive
Updated Branches:
  refs/heads/branch-3 b1c8b2ad3 -> eb842b75b


http://git-wip-us.apache.org/repos/asf/hive/blob/eb842b75/serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinarySerde.java
--
diff --git 
a/serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinarySerde.java
 
b/serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinarySerde.java
new file mode 100644
index 000..ccf5f44
--- /dev/null
+++ 
b/serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinarySerde.java
@@ -0,0 +1,597 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.serde2.teradata;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.common.type.Timestamp;
+import org.apache.hadoop.hive.serde2.io.ByteWritable;
+import org.apache.hadoop.hive.serde2.io.DateWritableV2;
+import org.apache.hadoop.hive.serde2.io.DoubleWritable;
+import org.apache.hadoop.hive.serde2.io.HiveCharWritable;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable;
+import org.apache.hadoop.hive.serde2.io.ShortWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampWritableV2;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.AbstractSerDe;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.SerDeSpec;
+import org.apache.hadoop.hive.serde2.SerDeStats;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.objectinspector.StructField;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.BinaryObjectInspector;
+import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.ByteObjectInspector;
+import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.DateObjectInspector;
+import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.DoubleObjectInspector;
+import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveCharObjectInspector;
+import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector;
+import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveVarcharObjectInspector;
+import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.IntObjectInspector;
+import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
+import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.ShortObjectInspector;
+import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.hive.common.type.Date;
+
+import javax.annotation.Nullable;
+import java.io.ByteArrayInputStream;
+import java.io.EOFException;
+import java.io.IOException;
+import java.text.ParseException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import static java.lang.String.format;
+
+/**
+ * https://cwiki.apache.org/confluence/display/Hive/TeradataBinarySerde.
+ * TeradataBinarySerde handles the serialization and deserialization of 
Teradata Binary Record
+ * passed from 

[2/2] hive git commit: HIVE-20225: SerDe to support Teradata Binary Format (Lu Li, reviewed by Carl Steinbach)

2018-09-11 Thread daijy
HIVE-20225: SerDe to support Teradata Binary Format (Lu Li, reviewed by Carl 
Steinbach)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/eb842b75
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/eb842b75
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/eb842b75

Branch: refs/heads/branch-3
Commit: eb842b75b4b0235da9eef6951e091f48640202ad
Parents: b1c8b2a
Author: Daniel Dai 
Authored: Tue Sep 11 12:58:05 2018 -0700
Committer: Daniel Dai 
Committed: Tue Sep 11 13:00:05 2018 -0700

--
 .../td_data_with_1mb_rowsize.teradata.gz| Bin 0 -> 616 bytes
 .../teradata_binary_table.deflate   | Bin 0 -> 1329 bytes
 .../ql/io/TeradataBinaryFileInputFormat.java|  66 ++
 .../ql/io/TeradataBinaryFileOutputFormat.java   | 112 
 .../hive/ql/io/TeradataBinaryRecordReader.java  | 280 +
 .../clientpositive/test_teradatabinaryfile.q| 123 
 .../test_teradatabinaryfile.q.out   | 537 +
 .../teradata/TeradataBinaryDataInputStream.java | 199 +++
 .../TeradataBinaryDataOutputStream.java | 270 +
 .../serde2/teradata/TeradataBinarySerde.java| 597 +++
 .../TestTeradataBinarySerdeForDate.java |  76 +++
 .../TestTeradataBinarySerdeForDecimal.java  | 106 
 .../TestTeradataBinarySerdeForTimeStamp.java| 111 
 .../TestTeradataBinarySerdeGeneral.java | 133 +
 14 files changed, 2610 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/eb842b75/data/files/teradata_binary_file/td_data_with_1mb_rowsize.teradata.gz
--
diff --git 
a/data/files/teradata_binary_file/td_data_with_1mb_rowsize.teradata.gz 
b/data/files/teradata_binary_file/td_data_with_1mb_rowsize.teradata.gz
new file mode 100644
index 000..7319e3c
Binary files /dev/null and 
b/data/files/teradata_binary_file/td_data_with_1mb_rowsize.teradata.gz differ

http://git-wip-us.apache.org/repos/asf/hive/blob/eb842b75/data/files/teradata_binary_file/teradata_binary_table.deflate
--
diff --git a/data/files/teradata_binary_file/teradata_binary_table.deflate 
b/data/files/teradata_binary_file/teradata_binary_table.deflate
new file mode 100644
index 000..fd53dde
Binary files /dev/null and 
b/data/files/teradata_binary_file/teradata_binary_table.deflate differ

http://git-wip-us.apache.org/repos/asf/hive/blob/eb842b75/ql/src/java/org/apache/hadoop/hive/ql/io/TeradataBinaryFileInputFormat.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/TeradataBinaryFileInputFormat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/TeradataBinaryFileInputFormat.java
new file mode 100644
index 000..bed87c5
--- /dev/null
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/io/TeradataBinaryFileInputFormat.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.io;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.mapred.FileInputFormat;
+import org.apache.hadoop.mapred.FileSplit;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.RecordReader;
+import org.apache.hadoop.mapred.Reporter;
+
+/**
+ * https://cwiki.apache.org/confluence/display/Hive/TeradataBinarySerde.
+ * FileInputFormat for Teradata binary files.
+ *
+ * In the Teradata Binary File, each record constructs as below:
+ * The first 2 bytes represents the length of the bytes next for this record.
+ * Then the null bitmap whose length is depended on the number of fields is 
followed.
+ * Then each field of the record is serialized into bytes - the serialization 
strategy is decided by the type of field.
+ * At last, there 

[hive] branch master updated: HIVE-21583: KillTriggerActionHandler should use "hive" credential (Daniel Dai, reviewed by Prasanth Jayachandran)

2019-04-05 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new baae1aa  HIVE-21583: KillTriggerActionHandler should use "hive" 
credential (Daniel Dai, reviewed by Prasanth Jayachandran)
baae1aa is described below

commit baae1aa697f2425d6a7a60ff4a0ab0e6c0ea6e88
Author: Daniel Dai 
AuthorDate: Fri Apr 5 15:06:49 2019 -0700

HIVE-21583: KillTriggerActionHandler should use "hive" credential (Daniel 
Dai, reviewed by Prasanth Jayachandran)

Signed-off-by: Prasanth Jayachandran 
---
 .../org/apache/hive/jdbc/TestTriggersWorkloadManager.java |  7 +++
 .../hadoop/hive/ql/exec/tez/KillTriggerActionHandler.java |  7 +--
 .../org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java   | 11 +++
 3 files changed, 19 insertions(+), 6 deletions(-)

diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java
 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java
index 6a59ff1..06dd6d0 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java
@@ -17,6 +17,7 @@
 package org.apache.hive.jdbc;
 
 import java.io.File;
+import java.io.FileWriter;
 import java.net.URL;
 import java.util.HashMap;
 import java.util.List;
@@ -57,6 +58,12 @@ public class TestTriggersWorkloadManager extends 
TestTriggersTezSessionPoolManag
 
 String confDir = "../../data/conf/llap/";
 HiveConf.setHiveSiteLocation(new URL("file://" + new 
File(confDir).toURI().getPath() + "/hive-site.xml"));
+conf = new HiveConf();
+conf.setVar(ConfVars.HIVE_AUTHENTICATOR_MANAGER, 
"org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator");
+java.nio.file.Path confPath = File.createTempFile("hive", "test").toPath();
+conf.writeXml(new FileWriter(confPath.toFile()));
+HiveConf.setHiveSiteLocation(new URL("file://" + confPath.toString()));
+
 System.out.println("Setting hive-site: " + HiveConf.getHiveSiteLocation());
 
 conf = new HiveConf();
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/KillTriggerActionHandler.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/KillTriggerActionHandler.java
index 06e9ff6..cb27998 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/KillTriggerActionHandler.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/KillTriggerActionHandler.java
@@ -16,6 +16,7 @@
 
 package org.apache.hadoop.hive.ql.exec.tez;
 
+import java.io.IOException;
 import java.util.Map;
 
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -24,6 +25,7 @@ import org.apache.hadoop.hive.ql.session.KillQuery;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.wm.Trigger;
 import org.apache.hadoop.hive.ql.wm.TriggerActionHandler;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -41,7 +43,8 @@ public class KillTriggerActionHandler implements 
TriggerActionHandler {
-SessionState ss = new SessionState(new HiveConf());
-ss.setIsHiveServerQuery(true);
-SessionState.start(ss);
 // Note: we get query ID here, rather than in the caller, where it 
would be more correct
 //   because we know which exact query we intend to kill. This is 
valid because we
 //   are not expecting query ID to change - we never reuse the 
session for which a
@@ -441,13 +440,17 @@ public class WorkloadManager extends 
TezSessionPoolSession.AbstractTriggerValida
 WmEvent wmEvent = new WmEvent(WmEvent.EventType.KILL);
 LOG.info("Invoking KillQuery for " + queryId + ": " + reason);
 try {
+  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+  SessionState ss = new SessionState(new HiveConf(), 
ugi.getShortUserName());
+  ss.setIsHiveServerQuery(true);
+  SessionState.start(ss);
   kq.killQuery(queryId, reason, toKill.getConf());
   addKillQueryResult(toKill, true);
   killCtx.killSessionFuture.set(true);
   wmEvent.endEvent(toKill);
   LOG.debug("Killed " + queryId);
   return;
-} catch (HiveException ex) {
+} catch (HiveException|IOException ex) {
   LOG.error("Failed to kill " + queryId + "; will try to restart 
AM instead" , ex);
 }
   } else {



[hive] branch master updated: HIVE-21478: Metastore cache update shall capture exception (Daniel Dai, reviewed by Zoltan Haindrich)

2019-03-26 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 12f8371  HIVE-21478: Metastore cache update shall capture exception 
(Daniel Dai, reviewed by Zoltan Haindrich)
12f8371 is described below

commit 12f83719d940034dc8c6273e2772f6b30d07108e
Author: Daniel Dai 
AuthorDate: Tue Mar 26 14:31:13 2019 -0700

HIVE-21478: Metastore cache update shall capture exception (Daniel Dai, 
reviewed by Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 
---
 .../java/org/apache/hadoop/hive/metastore/cache/CachedStore.java| 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index bded743..41b72d1 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -715,7 +715,11 @@ public class CachedStore implements RawStore, Configurable 
{
   }
 } else {
   // TODO: prewarm and update can probably be merged.
-  update();
+  try {
+update();
+  } catch (Exception e) {
+LOG.error("periodical refresh fail ", e);
+  }
 }
   } else {
 try {



[hive] branch master updated: HIVE-21479: NPE during metastore cache update (Daniel Dai, reviewed by Zoltan Haindrich)

2019-03-26 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 5708a0b  HIVE-21479: NPE during metastore cache update (Daniel Dai, 
reviewed by Zoltan Haindrich)
5708a0b is described below

commit 5708a0b797bf12b4f61afaf0d343ea6bd9b237e2
Author: Daniel Dai 
AuthorDate: Tue Mar 26 14:33:11 2019 -0700

HIVE-21479: NPE during metastore cache update (Daniel Dai, reviewed by 
Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich 
---
 .../hadoop/hive/metastore/cache/CachedStore.java   | 31 +-
 1 file changed, 18 insertions(+), 13 deletions(-)

diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 41b72d1..3564efe 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -812,7 +812,7 @@ public class CachedStore implements RawStore, Configurable {
   rawStore.openTransaction();
   try {
 Table table = rawStore.getTable(catName, dbName, tblName);
-if (!table.isSetPartitionKeys()) {
+if (table != null && !table.isSetPartitionKeys()) {
   List colNames = MetaStoreUtils.getColumnNamesForTable(table);
   Deadline.startTimer("getTableColumnStatistics");
 
@@ -856,18 +856,20 @@ public class CachedStore implements RawStore, 
Configurable {
   rawStore.openTransaction();
   try {
 Table table = rawStore.getTable(catName, dbName, tblName);
-List colNames = MetaStoreUtils.getColumnNamesForTable(table);
-List partNames = rawStore.listPartitionNames(catName, dbName, 
tblName, (short) -1);
-// Get partition column stats for this table
-Deadline.startTimer("getPartitionColumnStatistics");
-List partitionColStats =
-rawStore.getPartitionColumnStatistics(catName, dbName, tblName, 
partNames, colNames);
-Deadline.stopTimer();
-sharedCache.refreshPartitionColStatsInCache(catName, dbName, tblName, 
partitionColStats);
-List parts = rawStore.getPartitionsByNames(catName, dbName, 
tblName, partNames);
-// Also save partitions for consistency as they have the stats state.
-for (Partition part : parts) {
-  sharedCache.alterPartitionInCache(catName, dbName, tblName, 
part.getValues(), part);
+if (table != null) {
+  List colNames = MetaStoreUtils.getColumnNamesForTable(table);
+  List partNames = rawStore.listPartitionNames(catName, 
dbName, tblName, (short) -1);
+  // Get partition column stats for this table
+  Deadline.startTimer("getPartitionColumnStatistics");
+  List partitionColStats =
+  rawStore.getPartitionColumnStatistics(catName, dbName, 
tblName, partNames, colNames);
+  Deadline.stopTimer();
+  sharedCache.refreshPartitionColStatsInCache(catName, dbName, 
tblName, partitionColStats);
+  List parts = rawStore.getPartitionsByNames(catName, 
dbName, tblName, partNames);
+  // Also save partitions for consistency as they have the stats state.
+  for (Partition part : parts) {
+sharedCache.alterPartitionInCache(catName, dbName, tblName, 
part.getValues(), part);
+  }
 }
 committed = rawStore.commitTransaction();
   } catch (MetaException | NoSuchObjectException e) {
@@ -886,6 +888,9 @@ public class CachedStore implements RawStore, Configurable {
String tblName) {
   try {
 Table table = rawStore.getTable(catName, dbName, tblName);
+if (table == null) {
+  return;
+}
 List partNames = rawStore.listPartitionNames(catName, dbName, 
tblName, (short) -1);
 List colNames = MetaStoreUtils.getColumnNamesForTable(table);
 if ((partNames != null) && (partNames.size() > 0)) {



[hive] branch master updated: HIVE-21507: Hive swallows NPE if no delegation token found (Denes Bodo, reviewed by Zoltan Haindrich, Daniel Dai)

2019-03-26 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new afd7b5b  HIVE-21507: Hive swallows NPE if no delegation token found 
(Denes Bodo, reviewed by Zoltan Haindrich, Daniel Dai)
afd7b5b is described below

commit afd7b5b38556f638782606edbe6850ef70e1c8bb
Author: Denes Bodo 
AuthorDate: Tue Mar 26 14:25:45 2019 -0700

HIVE-21507: Hive swallows NPE if no delegation token found (Denes Bodo, 
reviewed by Zoltan Haindrich, Daniel Dai)

Signed-off-by: Zoltan Haindrich , Daniel Dai 
---
 .../java/org/apache/hive/jdbc/HiveConnection.java  | 62 ++
 .../org/apache/hive/jdbc/TestHiveConnection.java   | 60 +
 2 files changed, 101 insertions(+), 21 deletions(-)

diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java 
b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
index 32a4761..4c7119f 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
@@ -760,29 +760,21 @@ public class HiveConnection implements 
java.sql.Connection {
   }
 
   // Lookup the delegation token. First in the connection URL, then 
Configuration
-  private String getClientDelegationToken(Map jdbcConnConf)
-  throws SQLException {
+  private String getClientDelegationToken(Map jdbcConnConf) 
throws SQLException {
 String tokenStr = null;
-if 
(JdbcConnectionParams.AUTH_TOKEN.equalsIgnoreCase(jdbcConnConf.get(JdbcConnectionParams.AUTH_TYPE)))
 {
-  // check delegation token in job conf if any
+if 
(!JdbcConnectionParams.AUTH_TOKEN.equalsIgnoreCase(jdbcConnConf.get(JdbcConnectionParams.AUTH_TYPE)))
 {
+  return null;
+}
+DelegationTokenFetcher fetcher = new DelegationTokenFetcher();
+try {
+  tokenStr = fetcher.getTokenStringFromFile();
+} catch (IOException e) {
+  LOG.warn("Cannot get token from environment variable 
$HADOOP_TOKEN_FILE_LOCATION=" +
+  System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION));
+}
+if (tokenStr == null) {
   try {
-if (System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION) != 
null) {
-  try {
-Credentials cred = new Credentials();
-DataInputStream dis = new DataInputStream(new 
FileInputStream(System.getenv(UserGroupInformation
-.HADOOP_TOKEN_FILE_LOCATION)));
-cred.readTokenStorageStream(dis);
-dis.close();
-Token token = cred.getToken(new 
Text("hive"));
-tokenStr = token.encodeToUrlString();
-  } catch (IOException e) {
-LOG.warn("Cannot get token from environment variable 
$HADOOP_TOKEN_FILE_LOCATION=" +
-
System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION));
-  }
-}
-if (tokenStr == null) {
-  tokenStr = 
SessionUtils.getTokenStrForm(HiveAuthConstants.HS2_CLIENT_TOKEN);
-}
+return fetcher.getTokenFromSession();
   } catch (IOException e) {
 throw new SQLException("Error reading token ", e);
   }
@@ -790,6 +782,34 @@ public class HiveConnection implements java.sql.Connection 
{
 return tokenStr;
   }
 
+  static class DelegationTokenFetcher {
+String getTokenStringFromFile() throws IOException {
+  if (System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION) == 
null) {
+return null;
+  }
+  Credentials cred = new Credentials();
+  try (DataInputStream dis = new DataInputStream(new 
FileInputStream(System.getenv(UserGroupInformation
+  .HADOOP_TOKEN_FILE_LOCATION {
+cred.readTokenStorageStream(dis);
+  }
+  return getTokenFromCredential(cred, "hive");
+}
+
+String getTokenFromCredential(Credentials cred, String key) throws 
IOException {
+  Token token = cred.getToken(new Text(key));
+  if (token == null) {
+LOG.warn("Delegation token with key: [hive] cannot be found.");
+return null;
+  }
+  return token.encodeToUrlString();
+}
+
+String getTokenFromSession() throws IOException {
+  LOG.debug("Fetching delegation token from session.");
+  return SessionUtils.getTokenStrForm(HiveAuthConstants.HS2_CLIENT_TOKEN);
+}
+  }
+
   private void openSession() throws SQLException {
 TOpenSessionReq openReq = new TOpenSessionReq();
 
diff --git a/jdbc/src/test/org/apache/hive/jdbc/TestHiveConnection.java 
b/jdbc/src/test/org/apache/hive/jdbc/TestHiveConnection.java
new file mode 100644
index 000..bcd2608
--- /dev/null
+++ b/jdbc/src/test/org/apache/hive/jdbc/TestHiveConnection.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or mor

[hive] branch master updated: HIVE-21255: Remove QueryConditionBuilder in JdbcStorageHandler (Daniel Dai, reviewed by Jesus Camacho Rodriguez)

2019-03-01 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new f51f108  HIVE-21255: Remove QueryConditionBuilder in 
JdbcStorageHandler (Daniel Dai, reviewed by Jesus Camacho Rodriguez)
f51f108 is described below

commit f51f108b761f0c88647f48f30447dae12b308f31
Author: Daniel Dai 
AuthorDate: Fri Mar 1 11:52:18 2019 -0800

HIVE-21255: Remove QueryConditionBuilder in JdbcStorageHandler (Daniel Dai, 
reviewed by Jesus Camacho Rodriguez)

Signed-off-by: Jesus Camacho Rodriguez 
---
 .../hive/storage/jdbc/QueryConditionBuilder.java   | 186 -
 .../jdbc/conf/JdbcStorageConfigManager.java|  11 --
 .../storage/jdbc/TestQueryConditionBuilder.java| 150 -
 3 files changed, 347 deletions(-)

diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/QueryConditionBuilder.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/QueryConditionBuilder.java
deleted file mode 100644
index 194fad8..000
--- 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/QueryConditionBuilder.java
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hive.storage.jdbc;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.TableScanDesc;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hive.storage.jdbc.conf.JdbcStorageConfig;
-
-import java.beans.XMLDecoder;
-import java.io.ByteArrayInputStream;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Translates the hive query condition into a condition that can be run on the 
underlying database
- */
-public class QueryConditionBuilder {
-
-  private static final Logger LOGGER = 
LoggerFactory.getLogger(QueryConditionBuilder.class);
-  private static final String EMPTY_STRING = "";
-  private static QueryConditionBuilder instance = null;
-
-
-  public static QueryConditionBuilder getInstance() {
-if (instance == null) {
-  instance = new QueryConditionBuilder();
-}
-
-return instance;
-  }
-
-
-  private QueryConditionBuilder() {
-
-  }
-
-
-  public String buildCondition(Configuration conf) {
-if (conf == null) {
-  return EMPTY_STRING;
-}
-
-String filterXml = conf.get(TableScanDesc.FILTER_EXPR_CONF_STR);
-String hiveColumns = conf.get(serdeConstants.LIST_COLUMNS);
-String columnMapping = 
conf.get(JdbcStorageConfig.COLUMN_MAPPING.getPropertyName());
-
-if ((filterXml == null) || ((columnMapping == null) && (hiveColumns == 
null))) {
-  return EMPTY_STRING;
-}
-
-if (hiveColumns == null) {
-  hiveColumns = "";
-}
-
-Map columnMap = buildColumnMapping(columnMapping, 
hiveColumns);
-String condition = createConditionString(filterXml, columnMap);
-return condition;
-  }
-
-
-  /*
-   * Build a Hive-to-X column mapping,
-   *
-   */
-  private Map buildColumnMapping(String columnMapping, String 
hiveColumns) {
-if ((columnMapping == null) || (columnMapping.trim().isEmpty())) {
-  return createIdentityMap(hiveColumns);
-}
-
-Map columnMap = new HashMap();
-String[] mappingPairs = columnMapping.toLowerCase().split(",");
-for (String mapPair : mappingPairs) {
-  String[] columns = mapPair.split("=");
-  columnMap.put(columns[0].trim(), columns[1].trim());
-}
-
-return columnMap;
-  }
-
-
-  /*
-   * When no mapping is defined, it is assumed that the hive column names are 
equivalent to the column names in the
-   * underlying table
-   */
-  private Map createIdentityMap(String hiveColumns) {
-Map columnMap = new HashMap();
-String[] columns = hiveColumns.toLowerCase().split(",");
-
-for (String col : columns) {
-  columnMap.put(col.trim(), col.trim());
-}
-
-return columnMap;
-  }
-
-
-  /*
-   * Walk to Hive AST and translate the hive column names to their equivalent 
mappings. This is basically a cheat.
-   *
-   */
-  private String createConditionString(String filterXml, Map 
columnMap) {
-if ((fi

[hive] branch master updated: HIVE-21247: Webhcat beeline in secure mode (Daniel Dai, reviewed by Thejas Nair)

2019-03-01 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 8b2c6f0  HIVE-21247: Webhcat beeline in secure mode (Daniel Dai, 
reviewed by Thejas Nair)
8b2c6f0 is described below

commit 8b2c6f0cf28bdd7ce3fea5c4b191910064cae1c3
Author: Daniel Dai 
AuthorDate: Fri Mar 1 09:57:55 2019 -0800

HIVE-21247: Webhcat beeline in secure mode (Daniel Dai, reviewed by Thejas 
Nair)

Signed-off-by: Thejas M Nair 
---
 .../src/java/org/apache/hive/beeline/BeeLine.java  |  4 ++
 hcatalog/webhcat/svr/pom.xml   | 34 +++
 .../apache/hive/hcatalog/templeton/AppConfig.java  |  1 +
 .../hive/hcatalog/templeton/HiveDelegator.java |  6 ++
 .../hcatalog/templeton/SecureProxySupport.java |  1 +
 .../hive/hcatalog/templeton/tool/LaunchMapper.java | 20 ++-
 .../templeton/tool/TempletonControllerJob.java | 68 ++
 .../java/org/apache/hive/jdbc/HiveConnection.java  | 26 -
 packaging/src/main/assembly/bin.xml|  3 +
 pom.xml|  1 +
 10 files changed, 150 insertions(+), 14 deletions(-)

diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java 
b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index 54edfeb..82077cc 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -1203,6 +1203,10 @@ public class BeeLine implements Closeable {
   if (password != null) {
 
jdbcConnectionParams.getSessionVars().put(JdbcConnectionParams.AUTH_PASSWD, 
password);
   }
+  String auth = cl.getOptionValue("a");
+  if (auth != null) {
+
jdbcConnectionParams.getSessionVars().put(JdbcConnectionParams.AUTH_TYPE, auth);
+  }
   mergedConnectionProperties =
   HS2ConnectionFileUtils.mergeUserConnectionPropertiesAndBeelineSite(
   userConnectionProperties, jdbcConnectionParams);
diff --git a/hcatalog/webhcat/svr/pom.xml b/hcatalog/webhcat/svr/pom.xml
index 4dfade5..75f1c70 100644
--- a/hcatalog/webhcat/svr/pom.xml
+++ b/hcatalog/webhcat/svr/pom.xml
@@ -92,21 +92,45 @@
   com.sun.jersey
   jersey-core
   ${jersey.version}
+  
+
+  javax.ws.rs
+  jsr311-api
+
+  
 
 
   com.sun.jersey
   jersey-json
   ${jersey.version}
+  
+
+  com.sun.jersey
+  jersey-server
+
+  
 
 
   com.sun.jersey
   jersey-servlet
   ${jersey.version}
+  
+
+  com.sun.jersey
+  jersey-server
+
+  
 
 
   com.sun.jersey.contribs
   wadl-resourcedoc-doclet
   ${wadl-resourcedoc-doclet.version}
+  
+
+  com.sun.jersey
+  jersey-server
+
+  
 
 
   org.apache.commons
@@ -144,6 +168,11 @@
   ${slf4j.version}
 
 
+  org.apache.hive
+  hive-jdbc
+  ${project.version}
+
+
   org.apache.hadoop
   hadoop-auth
   ${hadoop.version}
@@ -199,6 +228,11 @@
 
   
 
+
+  javax.ws.rs
+  javax.ws.rs-api
+  ${rs-api.version}
+
 
 
   org.apache.hive
diff --git 
a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
 
b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
index 1fd9e47..b566cf8 100644
--- 
a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
+++ 
b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
@@ -163,6 +163,7 @@ public class AppConfig extends Configuration {
* of escape/unescape methods in {@link org.apache.hadoop.util.StringUtils} 
in webhcat.
*/
   public static final String HIVE_PROPS_NAME = "templeton.hive.properties";
+  public static final String HIVE_SERVER2_URL= "templeton.hive.hs2.url";
   public static final String SQOOP_ARCHIVE_NAME  = "templeton.sqoop.archive";
   public static final String SQOOP_PATH_NAME = "templeton.sqoop.path";
   public static final String SQOOP_HOME_PATH = "templeton.sqoop.home";
diff --git 
a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
 
b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
index 3f1968d..3f679ac 100644
--- 
a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
+++ 
b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
@@ -28,6 +28,7 @@ import java.util.Map;
 
 import org.apache.commons.exec.ExecuteException;
 import org.apache.hadoop.fs.Path;
+imp

[hive] branch master updated: HIVE-21296: Dropping varchar partition throw exception (Daniel Dai, reviewed by Anishek Agarwal)

2019-02-21 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 5f30498  HIVE-21296: Dropping varchar partition throw exception 
(Daniel Dai, reviewed by Anishek Agarwal)
5f30498 is described below

commit 5f3049828ca872d88c80602e2e7d46d9be6255f2
Author: Daniel Dai 
AuthorDate: Thu Feb 21 10:16:04 2019 -0800

HIVE-21296: Dropping varchar partition throw exception (Daniel Dai, 
reviewed by Anishek Agarwal)

Signed-off-by: Anishek Agarwal 
---
 .../java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java |  3 ++-
 ql/src/test/queries/clientpositive/partition_varchar1.q|  2 ++
 ql/src/test/results/clientpositive/partition_varchar1.q.out| 10 ++
 3 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
index eb5b111..9febee4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
@@ -102,7 +102,8 @@ public class ExprNodeDescUtils {
 
   private static boolean isDefaultPartition(ExprNodeDesc origin, String 
defaultPartitionName) {
 if (origin instanceof ExprNodeConstantDesc && 
((ExprNodeConstantDesc)origin).getValue() != null &&
-
((ExprNodeConstantDesc)origin).getValue().equals(defaultPartitionName)) {
+((ExprNodeConstantDesc)origin).getValue() instanceof String && 
((ExprNodeConstantDesc)origin).getValue()
+.equals(defaultPartitionName)) {
   return true;
 } else {
   return false;
diff --git a/ql/src/test/queries/clientpositive/partition_varchar1.q 
b/ql/src/test/queries/clientpositive/partition_varchar1.q
index 216bcf5..ce9ee76 100644
--- a/ql/src/test/queries/clientpositive/partition_varchar1.q
+++ b/ql/src/test/queries/clientpositive/partition_varchar1.q
@@ -42,4 +42,6 @@ select count(*) from partition_varchar_1 where dt <= 
'2000-01-01' and region = 1
 -- 20
 select count(*) from partition_varchar_1 where dt <> '2000-01-01' and region = 
1;
 
+alter table partition_varchar_1 drop partition (dt = '2000-01-01');
+
 drop table partition_varchar_1;
diff --git a/ql/src/test/results/clientpositive/partition_varchar1.q.out 
b/ql/src/test/results/clientpositive/partition_varchar1.q.out
index 93c9adf..b5d1890 100644
--- a/ql/src/test/results/clientpositive/partition_varchar1.q.out
+++ b/ql/src/test/results/clientpositive/partition_varchar1.q.out
@@ -190,6 +190,16 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partition_varchar_1
  A masked pattern was here 
 20
+PREHOOK: query: alter table partition_varchar_1 drop partition (dt = 
'2000-01-01')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: default@partition_varchar_1
+PREHOOK: Output: default@partition_varchar_1@dt=2000-01-01/region=1
+PREHOOK: Output: default@partition_varchar_1@dt=2000-01-01/region=2
+POSTHOOK: query: alter table partition_varchar_1 drop partition (dt = 
'2000-01-01')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: default@partition_varchar_1
+POSTHOOK: Output: default@partition_varchar_1@dt=2000-01-01/region=1
+POSTHOOK: Output: default@partition_varchar_1@dt=2000-01-01/region=2
 PREHOOK: query: drop table partition_varchar_1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@partition_varchar_1



[hive] branch master updated: HIVE-21295: StorageHandler shall convert date to string using Hive convention (Daniel Dai, reviewed by Jesus Camacho Rodriguez)

2019-02-21 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new e71b096  HIVE-21295: StorageHandler shall convert date to string using 
Hive convention (Daniel Dai, reviewed by Jesus Camacho Rodriguez)
e71b096 is described below

commit e71b09677b610c9b77c924105cc624b46e90695c
Author: Daniel Dai 
AuthorDate: Thu Feb 21 10:23:49 2019 -0800

HIVE-21295: StorageHandler shall convert date to string using Hive 
convention (Daniel Dai, reviewed by Jesus Camacho Rodriguez)

Signed-off-by: Jesus Camacho Rodriguez 
---
 .../org/apache/hive/storage/jdbc/JdbcSerDe.java|  7 +++-
 .../queries/clientpositive/external_jdbc_table.q   | 17 +-
 .../clientpositive/llap/external_jdbc_table.q.out  | 38 +++---
 3 files changed, 41 insertions(+), 21 deletions(-)

diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcSerDe.java 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcSerDe.java
index 87ba682..aabfd7c 100644
--- a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcSerDe.java
+++ b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcSerDe.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.io.MapWritable;
 import org.apache.hadoop.io.ObjectWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
+import org.apache.hive.common.util.DateUtils;
 import org.apache.hive.storage.jdbc.conf.JdbcStorageConfigManager;
 import org.apache.hive.storage.jdbc.dao.DatabaseAccessor;
 import org.apache.hive.storage.jdbc.dao.DatabaseAccessorFactory;
@@ -197,7 +198,11 @@ public class JdbcSerDe extends AbstractSerDe {
 case CHAR:
 case VARCHAR:
 case STRING:
-  rowVal = rowVal.toString();
+  if (rowVal instanceof java.sql.Date) {
+rowVal = DateUtils.getDateFormat().format((java.sql.Date)rowVal);
+  } else {
+rowVal = rowVal.toString();
+  }
   break;
 case DATE:
   if (rowVal instanceof java.sql.Date) {
diff --git a/ql/src/test/queries/clientpositive/external_jdbc_table.q 
b/ql/src/test/queries/clientpositive/external_jdbc_table.q
index 3e629d2..36ed93a 100644
--- a/ql/src/test/queries/clientpositive/external_jdbc_table.q
+++ b/ql/src/test/queries/clientpositive/external_jdbc_table.q
@@ -34,19 +34,19 @@ FROM src
 SELECT
 
 dboutput ( 
'jdbc:derby:;databaseName=${system:test.tmp.dir}/test_derby_as_external_table_db;create=true','','',
-'CREATE TABLE SIMPLE_DERBY_TABLE2 ("ikey" INTEGER, "bkey" BIGINT, "fkey" REAL, 
"dkey" DOUBLE )' ),
+'CREATE TABLE SIMPLE_DERBY_TABLE2 ("ikey" INTEGER, "bkey" BIGINT, "fkey" REAL, 
"dkey" DOUBLE, "datekey" DATE)' ),
 
 
dboutput('jdbc:derby:;databaseName=${system:test.tmp.dir}/test_derby_as_external_table_db;create=true','','',
-'INSERT INTO SIMPLE_DERBY_TABLE2 ("ikey","bkey","fkey","dkey") VALUES 
(?,?,?,?)','20','20','20.0','20.0'),
+'INSERT INTO SIMPLE_DERBY_TABLE2 ("ikey","bkey","fkey","dkey","datekey") 
VALUES (?,?,?,?,?)','20','20','20.0','20.0','1999-02-22'),
 
 
dboutput('jdbc:derby:;databaseName=${system:test.tmp.dir}/test_derby_as_external_table_db;create=true','','',
-'INSERT INTO SIMPLE_DERBY_TABLE2 ("ikey","bkey","fkey","dkey") VALUES 
(?,?,?,?)','-20','8','9.0','11.0'),
+'INSERT INTO SIMPLE_DERBY_TABLE2 ("ikey","bkey","fkey","dkey","datekey") 
VALUES (?,?,?,?,?)','-20','8','9.0','11.0','2000-03-15'),
 
 
dboutput('jdbc:derby:;databaseName=${system:test.tmp.dir}/test_derby_as_external_table_db;create=true','','',
-'INSERT INTO SIMPLE_DERBY_TABLE2 ("ikey","bkey","fkey","dkey") VALUES 
(?,?,?,?)','101','-16','66.0','-75.0'),
+'INSERT INTO SIMPLE_DERBY_TABLE2 ("ikey","bkey","fkey","dkey","datekey") 
VALUES (?,?,?,?,?)','101','-16','66.0','-75.0','2010-04-01'),
 
 
dboutput('jdbc:derby:;databaseName=${system:test.tmp.dir}/test_derby_as_external_table_db;create=true','','',
-'INSERT INTO SIMPLE_DERBY_TABLE2 ("ikey","bkey","fkey","dkey") VALUES 
(?,?,?,?)','40','50','-455.4543','330.767')
+'INSERT INTO SIMPLE_DERBY_TABLE2 ("ikey","bkey","fkey","dkey","datekey") 
VALUES (?,?,?,?,?)','40','50','-455.4543','330.767','2010-04-02')
 
 limit 1;
 
@@ -75,7 +75,8 @@ CREATE EXTERNAL TABLE ext_simple_derby_table2
  ikey int,
  bkey bigint,
  fkey float,
- dkey double
+ dkey double,
+ datekey string
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
 TBLPROPERTIES

[hive] branch master updated: Revert "HIVE-21247: Webhcat beeline in secure mode (Daniel Dai, reviewed by Thejas Nair)"

2019-02-26 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 77b9c3c  Revert "HIVE-21247: Webhcat beeline in secure mode (Daniel 
Dai, reviewed by Thejas Nair)"
77b9c3c is described below

commit 77b9c3c79953121c19554a7de6953a6e21de9db6
Author: Daniel Dai 
AuthorDate: Tue Feb 26 19:16:07 2019 -0800

Revert "HIVE-21247: Webhcat beeline in secure mode (Daniel Dai, reviewed by 
Thejas Nair)"

This reverts commit 3e9614eebd19f9843b28d97aaee1c3dfb815fb3d.
---
 .../src/java/org/apache/hive/beeline/BeeLine.java  |  4 --
 hcatalog/webhcat/svr/pom.xml   | 16 -
 .../apache/hive/hcatalog/templeton/AppConfig.java  |  1 -
 .../hive/hcatalog/templeton/HiveDelegator.java |  6 --
 .../hcatalog/templeton/SecureProxySupport.java |  1 -
 .../hive/hcatalog/templeton/tool/LaunchMapper.java | 20 +--
 .../templeton/tool/TempletonControllerJob.java | 68 --
 .../java/org/apache/hive/jdbc/HiveConnection.java  | 26 +
 packaging/src/main/assembly/bin.xml|  3 -
 pom.xml|  1 -
 10 files changed, 14 insertions(+), 132 deletions(-)

diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java 
b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index cded55f..65eee2c 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -1203,10 +1203,6 @@ public class BeeLine implements Closeable {
   if (password != null) {
 
jdbcConnectionParams.getSessionVars().put(JdbcConnectionParams.AUTH_PASSWD, 
password);
   }
-  String auth = cl.getOptionValue("a");
-  if (auth != null) {
-
jdbcConnectionParams.getSessionVars().put(JdbcConnectionParams.AUTH_TYPE, auth);
-  }
   mergedConnectionProperties =
   HS2ConnectionFileUtils.mergeUserConnectionPropertiesAndBeelineSite(
   userConnectionProperties, jdbcConnectionParams);
diff --git a/hcatalog/webhcat/svr/pom.xml b/hcatalog/webhcat/svr/pom.xml
index 36d15cd..4dfade5 100644
--- a/hcatalog/webhcat/svr/pom.xml
+++ b/hcatalog/webhcat/svr/pom.xml
@@ -92,12 +92,6 @@
   com.sun.jersey
   jersey-core
   ${jersey.version}
-  
-
-  javax.ws.rs
-  jsr311-api
-
-  
 
 
   com.sun.jersey
@@ -150,11 +144,6 @@
   ${slf4j.version}
 
 
-  org.apache.hive
-  hive-jdbc
-  ${project.version}
-
-
   org.apache.hadoop
   hadoop-auth
   ${hadoop.version}
@@ -210,11 +199,6 @@
 
   
 
-
-  javax.ws.rs
-  javax.ws.rs-api
-  ${rs-api.version}
-
 
 
   org.apache.hive
diff --git 
a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
 
b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
index b566cf8..1fd9e47 100644
--- 
a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
+++ 
b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
@@ -163,7 +163,6 @@ public class AppConfig extends Configuration {
* of escape/unescape methods in {@link org.apache.hadoop.util.StringUtils} 
in webhcat.
*/
   public static final String HIVE_PROPS_NAME = "templeton.hive.properties";
-  public static final String HIVE_SERVER2_URL= "templeton.hive.hs2.url";
   public static final String SQOOP_ARCHIVE_NAME  = "templeton.sqoop.archive";
   public static final String SQOOP_PATH_NAME = "templeton.sqoop.path";
   public static final String SQOOP_HOME_PATH = "templeton.sqoop.home";
diff --git 
a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
 
b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
index 3f679ac..3f1968d 100644
--- 
a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
+++ 
b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
@@ -28,7 +28,6 @@ import java.util.Map;
 
 import org.apache.commons.exec.ExecuteException;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hive.hcatalog.templeton.tool.JobSubmissionConstants;
 import org.apache.hive.hcatalog.templeton.tool.TempletonControllerJob;
 import org.apache.hive.hcatalog.templeton.tool.TempletonUtils;
@@ -79,11 +78,6 @@ public class HiveDelegator extends LauncherDelegator {
   args.add("-p");
   args.add("default");
 
-  if (UserGroupInformation.isSecurityEnabled()) {
-args.add("-a"

[hive] 02/03: Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/hive

2019-03-06 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git

commit 75edc069c4579ff34c8e174c0d10f02decae5dea
Merge: 1252886 0413fec
Author: Daniel Dai 
AuthorDate: Wed Mar 6 11:04:03 2019 -0800

Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/hive

 .../java/org/apache/hadoop/hive/cli/CliDriver.java |   3 +
 .../org/apache/hadoop/hive/conf/Constants.java |   2 +
 .../hive/hcatalog/streaming/TestStreaming.java |   2 +
 .../parse/TestReplicationWithTableMigrationEx.java |  67 
 jdbc-handler/pom.xml   |   6 +-
 .../hive/storage/jdbc/JdbcStorageHandler.java  |   4 +-
 .../jdbc/dao/GenericJdbcDatabaseAccessor.java  |   8 +-
 pom.xml|   2 +
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java|   3 +
 .../apache/hadoop/hive/ql/exec/FetchOperator.java  |  37 -
 .../org/apache/hadoop/hive/ql/exec/Utilities.java  |  97 +++-
 .../bootstrap/events/filesystem/FSTableEvent.java  |   3 +
 .../hive/ql/io/HiveSequenceFileInputFormat.java|  63 
 .../calcite/HiveDefaultRelMetadataProvider.java| 171 +
 .../optimizer/calcite/rules/HiveSemiJoinRule.java  |  48 +++---
 .../hadoop/hive/ql/parse/CalcitePlanner.java   |  33 ++--
 .../apache/hadoop/hive/ql/parse/GenTezUtils.java   |   9 ++
 .../hive/ql/parse/ImportSemanticAnalyzer.java  |   3 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java |   2 +-
 .../apache/hadoop/hive/ql/parse/TaskCompiler.java  |  37 -
 .../hadoop/hive/ql/plan/CreateTableDesc.java   |  13 ++
 .../apache/hadoop/hive/ql/plan/CreateViewDesc.java |  13 ++
 .../org/apache/hadoop/hive/ql/plan/FetchWork.java  |  22 ++-
 .../apache/hadoop/hive/ql/plan/FileSinkDesc.java   |  38 -
 .../hadoop/hive/ql/plan/ImportTableDesc.java   |  13 ++
 .../org/apache/hadoop/hive/ql/plan/PlanUtils.java  |   6 +-
 .../hadoop/hive/ql/stats/fs/FSStatsAggregator.java |  65 ++--
 .../hadoop/hive/ql/exec/TestFileSinkOperator.java  |   2 +-
 ql/src/test/queries/clientpositive/semijoin.q  |  13 ++
 .../results/clientpositive/llap/semijoin.q.out |  51 +-
 .../clientpositive/perf/tez/cbo_query14.q.out  |   6 +-
 .../clientpositive/perf/tez/cbo_query83.q.out  |  12 +-
 .../perf/tez/constraints/cbo_query23.q.out |   8 +-
 .../perf/tez/constraints/cbo_query83.q.out |  12 +-
 .../results/clientpositive/spark/semijoin.q.out|  51 +-
 .../apache/hive/service/server/HiveServer2.java|   4 +
 36 files changed, 750 insertions(+), 179 deletions(-)




[hive] branch master updated (0413fec -> 881080f)

2019-03-06 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git.


from 0413fec  HIVE-21314 : Hive Replication not retaining the owner in the 
replicated table.  (Mahesh Kumar Behera, reviewed by Sankar Hariappan)
 new 1252886  HIVE-21253: Support DB2 in JDBC StorageHandler (Daniel Dai, 
reviewed by Thejas Nair)
 new 75edc06  Merge branch 'master' of 
https://git-wip-us.apache.org/repos/asf/hive
 new 881080f  HIVE-21389: Hive distribution miss javax.ws.rs-api.jar after 
HIVE-21247 (Daniel Dai, reviewed by Thejas Nair)

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../main/java/org/apache/hive/storage/jdbc/JdbcStorageHandler.java | 4 
 .../main/java/org/apache/hive/storage/jdbc/conf/DatabaseType.java  | 1 +
 .../{PostgresDatabaseAccessor.java => DB2DatabaseAccessor.java}| 7 +++
 .../org/apache/hive/storage/jdbc/dao/DatabaseAccessorFactory.java  | 4 
 packaging/src/main/assembly/bin.xml| 1 +
 5 files changed, 13 insertions(+), 4 deletions(-)
 copy 
jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/{PostgresDatabaseAccessor.java
 => DB2DatabaseAccessor.java} (84%)



[hive] 03/03: HIVE-21389: Hive distribution miss javax.ws.rs-api.jar after HIVE-21247 (Daniel Dai, reviewed by Thejas Nair)

2019-03-06 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git

commit 881080f1fb459a6061f08701ade8a79798b9b41b
Author: Daniel Dai 
AuthorDate: Wed Mar 6 11:07:42 2019 -0800

HIVE-21389: Hive distribution miss javax.ws.rs-api.jar after HIVE-21247 
(Daniel Dai, reviewed by Thejas Nair)

Signed-off-by: Thejas M Nair 
---
 packaging/src/main/assembly/bin.xml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/packaging/src/main/assembly/bin.xml 
b/packaging/src/main/assembly/bin.xml
index 766161d..24f9a45 100644
--- a/packaging/src/main/assembly/bin.xml
+++ b/packaging/src/main/assembly/bin.xml
@@ -76,6 +76,7 @@
   
 org.apache.hive.hcatalog:hive-hcatalog-core
 
org.apache.hive.hcatalog:hive-hcatalog-server-extensions
+javax.ws.rs:javax.ws.rs-api
   
 
 



[hive] 01/03: HIVE-21253: Support DB2 in JDBC StorageHandler (Daniel Dai, reviewed by Thejas Nair)

2019-03-06 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git

commit 125288658938dcfa1fe8c144dff8b1f800b1bcc4
Author: Daniel Dai 
AuthorDate: Fri Mar 1 21:23:02 2019 -0800

HIVE-21253: Support DB2 in JDBC StorageHandler (Daniel Dai, reviewed by 
Thejas Nair)

Signed-off-by: Thejas M Nair 
---
 .../hive/storage/jdbc/JdbcStorageHandler.java  |  4 ++
 .../hive/storage/jdbc/conf/DatabaseType.java   |  1 +
 .../hive/storage/jdbc/dao/DB2DatabaseAccessor.java | 44 ++
 .../storage/jdbc/dao/DatabaseAccessorFactory.java  |  4 ++
 4 files changed, 53 insertions(+)

diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcStorageHandler.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcStorageHandler.java
index d8c5433..0479eec 100644
--- 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcStorageHandler.java
+++ 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcStorageHandler.java
@@ -150,6 +150,10 @@ public class JdbcStorageHandler implements 
HiveStorageHandler {
 } catch (Exception e) {
 }
 try {
+  classesToLoad.add(Class.forName("com.ibm.db2.jcc.DB2Driver"));
+} catch (Exception e) {
+} // Adding db2 jdbc driver if exists
+try {
   JarUtils.addDependencyJars(conf, classesToLoad);
 } catch (IOException e) {
   LOGGER.error("Could not add necessary JDBC storage handler dependencies 
to classpath", e);
diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/conf/DatabaseType.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/conf/DatabaseType.java
index b8b770f..bdcc3f3 100644
--- 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/conf/DatabaseType.java
+++ 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/conf/DatabaseType.java
@@ -17,6 +17,7 @@ package org.apache.hive.storage.jdbc.conf;
 public enum DatabaseType {
   MYSQL,
   H2,
+  DB2,
   DERBY,
   ORACLE,
   POSTGRES,
diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DB2DatabaseAccessor.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DB2DatabaseAccessor.java
new file mode 100644
index 000..fab9829
--- /dev/null
+++ 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DB2DatabaseAccessor.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.storage.jdbc.dao;
+
+/**
+ * DB2 specific data accessor. DB2 JDBC drivers works similar to Postgres, so 
the current
+ * implementation of DB2DatabaseAccessor is the same as 
PostgresDatabaseAccessor
+ */
+public class DB2DatabaseAccessor extends GenericJdbcDatabaseAccessor {
+  @Override
+  protected String addLimitAndOffsetToQuery(String sql, int limit, int offset) 
{
+if (offset == 0) {
+  return addLimitToQuery(sql, limit);
+} else {
+  if (limit == -1) {
+return sql;
+  }
+  return sql + " LIMIT " + limit + " OFFSET " + offset;
+}
+  }
+
+  @Override
+  protected String addLimitToQuery(String sql, int limit) {
+if (limit == -1) {
+  return sql;
+}
+return sql + " LIMIT " + limit;
+  }
+}
diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DatabaseAccessorFactory.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DatabaseAccessorFactory.java
index 692cb23..e531ecc 100644
--- 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DatabaseAccessorFactory.java
+++ 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DatabaseAccessorFactory.java
@@ -51,6 +51,10 @@ public class DatabaseAccessorFactory {
   accessor = new MsSqlDatabaseAccessor();
   break;
 
+case DB2:
+  accessor = new DB2DatabaseAccessor();
+  break;
+
 default:
   accessor = new GenericJdbcDatabaseAccessor();
   break;



[hive] branch master updated: HIVE-21308: Negative forms of variables are not supported in HPL/SQL (Baoning He, reviewed by Daniel Dai)

2019-02-22 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 49fe5fc  HIVE-21308: Negative forms of variables are not supported in 
HPL/SQL (Baoning He, reviewed by Daniel Dai)
49fe5fc is described below

commit 49fe5fc50a97c9fbd08c74aa4ee6e7e30fe22ec8
Author: Daniel Dai 
AuthorDate: Fri Feb 22 09:41:43 2019 -0800

HIVE-21308: Negative forms of variables are not supported in HPL/SQL 
(Baoning He, reviewed by Daniel Dai)
---
 .../main/antlr4/org/apache/hive/hplsql/Hplsql.g4   |  2 +-
 .../src/main/java/org/apache/hive/hplsql/Exec.java | 20 +---
 .../src/main/java/org/apache/hive/hplsql/Var.java  | 22 --
 hplsql/src/test/queries/local/declare2.sql |  9 +
 hplsql/src/test/results/local/declare2.out.txt | 10 +-
 5 files changed, 56 insertions(+), 7 deletions(-)

diff --git a/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4 
b/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
index 77c2e2c..d25e8c5 100644
--- a/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
+++ b/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
@@ -1183,7 +1183,7 @@ timestamp_literal :   // TIMESTAMP 
'-MM-DD HH:MI:SS.FFF'
  ;
  
 ident :
-   (L_ID | non_reserved_words) ('.' (L_ID | non_reserved_words))* 
+   '-'? (L_ID | non_reserved_words) ('.' (L_ID | non_reserved_words))*
  ;
  
 string :   // String literal (single or double 
quoted)
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java 
b/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
index 9e27ba1..47f5cef 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
@@ -2129,12 +2129,26 @@ public class Exec extends HplsqlBaseVisitor {
* Identifier
*/
   @Override 
-  public Integer visitIdent(HplsqlParser.IdentContext ctx) { 
+  public Integer visitIdent(HplsqlParser.IdentContext ctx) {
+boolean hasSub = false;
 String ident = ctx.getText();
-Var var = findVariable(ident);
+String actualIdent = ident;
+if (ident.startsWith("-")) {
+  hasSub = true;
+  actualIdent = ident.substring(1, ident.length());
+}
+
+Var var = findVariable(actualIdent);
 if (var != null) {
   if (!exec.buildSql) {
-exec.stackPush(var);
+if (hasSub) {
+  Var var1 = new Var(var);
+  var1.negate();
+  exec.stackPush(var1);
+}
+else {
+  exec.stackPush(var);
+}
   }
   else {
 exec.stackPush(new Var(ident, Var.Type.STRING, var.toSqlString()));
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Var.java 
b/hplsql/src/main/java/org/apache/hive/hplsql/Var.java
index a117cb6..06b0e9b 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Var.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Var.java
@@ -552,13 +552,31 @@ public class Var {
}

/**
-* Negate the boolean value
+* Negate the value
 */
public void negate() {
-if(type == Type.BOOL && value != null) {
+if (value == null){
+  return;
+}
+if (type == Type.BOOL) {
   boolean v = ((Boolean)value).booleanValue();
   value = Boolean.valueOf(!v);
 }
+else if (type == Type.DECIMAL) {
+  BigDecimal v = (BigDecimal)value;
+  value = v.negate();
+}
+else if (type == Type.DOUBLE) {
+  Double v = (Double)value;
+  value = -v;
+}
+else if (type == Type.BIGINT) {
+  Long v = (Long)value;
+  value = -v;
+}
+else {
+  throw new NumberFormatException("invalid type " + type);
+}
   }

/**
diff --git a/hplsql/src/test/queries/local/declare2.sql 
b/hplsql/src/test/queries/local/declare2.sql
index 992d09e..9622033 100644
--- a/hplsql/src/test/queries/local/declare2.sql
+++ b/hplsql/src/test/queries/local/declare2.sql
@@ -11,3 +11,12 @@ declare
 begin
   null;
 end;
+
+declare
+  num1 int := 1;
+  num2 int := 2;
+begin
+  print num1;
+  print -num1;
+  print -num1*2+num2;
+end;
\ No newline at end of file
diff --git a/hplsql/src/test/results/local/declare2.out.txt 
b/hplsql/src/test/results/local/declare2.out.txt
index e22ca78..6a0ac18 100644
--- a/hplsql/src/test/results/local/declare2.out.txt
+++ b/hplsql/src/test/results/local/declare2.out.txt
@@ -4,4 +4,12 @@ Ln:5 PRINT
 a
 Ln:6 PRINT
 1
-Ln:10 DECLARE code char = 'a'
\ No newline at end of file
+Ln:10 DECLARE code char = 'a'
+Ln:16 DECLARE num1 int = 1
+Ln:17 DECLARE num2 int = 2
+Ln:19 PRINT
+1
+Ln:20 PRINT
+-1
+Ln:21 PRINT
+0
\ No newline at end of file



[hive] branch master updated: HIVE-20550: Switch WebHCat to use beeline to submit Hive queries (Daniel Dai, reviewed by Thejas Nair)

2019-02-26 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 7b0b9ae  HIVE-20550: Switch WebHCat to use beeline to submit Hive 
queries (Daniel Dai, reviewed by Thejas Nair)
7b0b9ae is described below

commit 7b0b9ae328757a57d1d88377ea84091487a25f8f
Author: Daniel Dai 
AuthorDate: Tue Feb 26 16:35:30 2019 -0800

HIVE-20550: Switch WebHCat to use beeline to submit Hive queries (Daniel 
Dai, reviewed by Thejas Nair)

Signed-off-by: Thejas M Nair 
---
 .../test/e2e/templeton/drivers/TestDriverCurl.pm   |  6 +--
 .../test/e2e/templeton/tests/jobsubmission.conf|  6 +--
 .../hive/hcatalog/templeton/DeleteDelegator.java   | 59 +++---
 .../hive/hcatalog/templeton/HiveDelegator.java | 25 +++--
 .../hive/hcatalog/templeton/tool/JobState.java | 13 +
 .../templeton/tool/JobSubmissionConstants.java |  3 ++
 .../hive/hcatalog/templeton/tool/LaunchMapper.java | 23 ++---
 .../hcatalog/templeton/tool/TempletonUtils.java|  6 +++
 .../templeton/tool/TestTempletonUtils.java |  3 ++
 9 files changed, 107 insertions(+), 37 deletions(-)

diff --git a/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm 
b/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm
index 66a6ca1..e62269b 100644
--- a/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm
+++ b/hcatalog/src/test/e2e/templeton/drivers/TestDriverCurl.pm
@@ -555,12 +555,12 @@ sub execCurlCmd(){
 my %result;
 my $out;
 my $err;
-IPC::Run::run(\@curl_cmd, \undef, $out, $err)
+IPC::Run::run(\@curl_cmd, \undef, $log, $log)
 or die "Failed running curl cmd " . join ' ', @curl_cmd;
 
 $result{'rc'} = $? >> 8;
-$result{'stderr'} = $err;
-$result{'stdout'} = $out;
+$result{'stderr'} = $log;
+$result{'stdout'} = $log;
 $result{'body'} = `cat $res_body`;
 
 my @full_header = `cat $res_header`;
diff --git a/hcatalog/src/test/e2e/templeton/tests/jobsubmission.conf 
b/hcatalog/src/test/e2e/templeton/tests/jobsubmission.conf
index a1b0284..824eb92 100644
--- a/hcatalog/src/test/e2e/templeton/tests/jobsubmission.conf
+++ b/hcatalog/src/test/e2e/templeton/tests/jobsubmission.conf
@@ -324,7 +324,7 @@ $cfg =
 #results
  'status_code' => 200,
  'check_job_created' => 1,
- 'check_job_exit_value' => 64,
+ 'check_job_exit_value' => 1,
 
 },
  
@@ -443,7 +443,7 @@ $cfg =
  'num' => 9,
  'method' => 'POST',
  'url' => ':TEMPLETON_URL:/templeton/v1/hive?user.name=:UNAME:',
- 'post_options' => ['execute=add jar piggybank.jar', 
'files=:INPDIR_HDFS:/piggybank.jar',],
+ 'post_options' => ['execute=add jar :INPDIR_HDFS:/piggybank.jar',],
  'json_field_substr_match' => { 'id' => '\d+'},
 #results
  'status_code' => 200,
@@ -499,7 +499,7 @@ $cfg =
 {
 #enable logs
  'num' => 13,
- 'ignore23' => 'Log collector does not work with Hadoop 2',
+ 'ignore' => 'Log collector does not work with Hadoop 2/3',
  'method' => 'POST',
  'url' => ':TEMPLETON_URL:/templeton/v1/hive?user.name=:UNAME:',
  'post_options' => ['execute=select a,b from mynums', 
'statusdir=:OUTDIR:/status', 'enablelog=true'],
diff --git 
a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/DeleteDelegator.java
 
b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/DeleteDelegator.java
index 049c9a4..5afd1b9 100644
--- 
a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/DeleteDelegator.java
+++ 
b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/DeleteDelegator.java
@@ -19,8 +19,16 @@
 package org.apache.hive.hcatalog.templeton;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.regex.Pattern;
 
+import org.apache.commons.io.IOUtils;
+import org.apache.hive.hcatalog.templeton.tool.TempletonUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.shims.HadoopShims.WebHCatJTShim;
@@ -39,6 +47,36 @@ public class DeleteDelegator extends TempletonDelegator {
 super(appConf);
   }
 
+  private String runProgram(String[] cmd) throws IOException, 
InterruptedException {
+ProcessBuilder pb = new ProcessBuilder(cmd);
+Set keys = new HashSet(pb.environment().keySet());
+for (String key : keys) {
+  pb.environment().remove(key);
+}
+Process p = pb.start();
+String stdout = IOUtils.toString(p.getInputStream());
+String stderr = IOUtils.toString(p.getErrorStream());
+int code = p.waitFor();

[hive] branch master updated: HIVE-21247: Webhcat beeline in secure mode (Daniel Dai, reviewed by Thejas Nair)

2019-02-26 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 3e9614e  HIVE-21247: Webhcat beeline in secure mode (Daniel Dai, 
reviewed by Thejas Nair)
3e9614e is described below

commit 3e9614eebd19f9843b28d97aaee1c3dfb815fb3d
Author: Daniel Dai 
AuthorDate: Tue Feb 26 16:38:26 2019 -0800

HIVE-21247: Webhcat beeline in secure mode (Daniel Dai, reviewed by Thejas 
Nair)

Signed-off-by: Thejas M Nair 
---
 .../src/java/org/apache/hive/beeline/BeeLine.java  |  4 ++
 hcatalog/webhcat/svr/pom.xml   | 16 +
 .../apache/hive/hcatalog/templeton/AppConfig.java  |  1 +
 .../hive/hcatalog/templeton/HiveDelegator.java |  6 ++
 .../hcatalog/templeton/SecureProxySupport.java |  1 +
 .../hive/hcatalog/templeton/tool/LaunchMapper.java | 20 ++-
 .../templeton/tool/TempletonControllerJob.java | 68 ++
 .../java/org/apache/hive/jdbc/HiveConnection.java  | 26 -
 packaging/src/main/assembly/bin.xml|  3 +
 pom.xml|  1 +
 10 files changed, 132 insertions(+), 14 deletions(-)

diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java 
b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index 65eee2c..cded55f 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -1203,6 +1203,10 @@ public class BeeLine implements Closeable {
   if (password != null) {
 
jdbcConnectionParams.getSessionVars().put(JdbcConnectionParams.AUTH_PASSWD, 
password);
   }
+  String auth = cl.getOptionValue("a");
+  if (auth != null) {
+
jdbcConnectionParams.getSessionVars().put(JdbcConnectionParams.AUTH_TYPE, auth);
+  }
   mergedConnectionProperties =
   HS2ConnectionFileUtils.mergeUserConnectionPropertiesAndBeelineSite(
   userConnectionProperties, jdbcConnectionParams);
diff --git a/hcatalog/webhcat/svr/pom.xml b/hcatalog/webhcat/svr/pom.xml
index 4dfade5..36d15cd 100644
--- a/hcatalog/webhcat/svr/pom.xml
+++ b/hcatalog/webhcat/svr/pom.xml
@@ -92,6 +92,12 @@
   com.sun.jersey
   jersey-core
   ${jersey.version}
+  
+
+  javax.ws.rs
+  jsr311-api
+
+  
 
 
   com.sun.jersey
@@ -144,6 +150,11 @@
   ${slf4j.version}
 
 
+  org.apache.hive
+  hive-jdbc
+  ${project.version}
+
+
   org.apache.hadoop
   hadoop-auth
   ${hadoop.version}
@@ -199,6 +210,11 @@
 
   
 
+
+  javax.ws.rs
+  javax.ws.rs-api
+  ${rs-api.version}
+
 
 
   org.apache.hive
diff --git 
a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
 
b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
index 1fd9e47..b566cf8 100644
--- 
a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
+++ 
b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
@@ -163,6 +163,7 @@ public class AppConfig extends Configuration {
* of escape/unescape methods in {@link org.apache.hadoop.util.StringUtils} 
in webhcat.
*/
   public static final String HIVE_PROPS_NAME = "templeton.hive.properties";
+  public static final String HIVE_SERVER2_URL= "templeton.hive.hs2.url";
   public static final String SQOOP_ARCHIVE_NAME  = "templeton.sqoop.archive";
   public static final String SQOOP_PATH_NAME = "templeton.sqoop.path";
   public static final String SQOOP_HOME_PATH = "templeton.sqoop.home";
diff --git 
a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
 
b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
index 3f1968d..3f679ac 100644
--- 
a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
+++ 
b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
@@ -28,6 +28,7 @@ import java.util.Map;
 
 import org.apache.commons.exec.ExecuteException;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hive.hcatalog.templeton.tool.JobSubmissionConstants;
 import org.apache.hive.hcatalog.templeton.tool.TempletonControllerJob;
 import org.apache.hive.hcatalog.templeton.tool.TempletonUtils;
@@ -78,6 +79,11 @@ public class HiveDelegator extends LauncherDelegator {
   args.add("-p");
   args.add("default");
 
+  if (UserGroupInformation.isSecurityEnabled()) {
+args.add("-a");
+args.add("delegationToken");

[hive] branch master updated: HIVE-21462: Upgrading SQL server backed metastore when changing data type of a column with constraints (Ashutosh Bapat, reviewed by Daniel Dai)

2019-03-22 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new e157814  HIVE-21462: Upgrading SQL server backed metastore when 
changing data type of a column with constraints (Ashutosh Bapat, reviewed by 
Daniel Dai)
e157814 is described below

commit e15781455aacf729c587b47d89d525d08eafb6b8
Author: Ashutosh Bapat 
AuthorDate: Thu Mar 21 23:34:34 2019 -0700

HIVE-21462: Upgrading SQL server backed metastore when changing data type 
of a column with constraints (Ashutosh Bapat, reviewed by Daniel Dai)

Signed-off-by: Daniel Dai 
---
 .../sql/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql | 47 --
 .../sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql | 27 -
 .../sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql | 30 +-
 .../sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql |  4 +-
 4 files changed, 83 insertions(+), 25 deletions(-)

diff --git 
a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
 
b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
index 64d8fca..b3f985c 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
@@ -1,14 +1,53 @@
 SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS MESSAGE;
 
+-- We can not change the datatype of a column with default value. Hence we 
first drop the default constraint
+-- and then change the datatype. We wrap the code to drop the default 
constraint in a stored procedure to avoid
+-- code duplicate. We create temporary stored procedures since we do not need 
them during normal
+-- metastore operation.
+CREATE PROCEDURE #DROP_DEFAULT_CONSTRAINT @TBL_NAME sysname, @COL_NAME sysname
+AS
+BEGIN
+   DECLARE @constraintname sysname
+   SELECT @constraintname = default_constraints.name
+   FROM sys.all_columns INNER JOIN sys.tables ON 
all_columns.object_id = tables.object_id
+   INNER JOIN sys.schemas ON tables.schema_id = 
schemas.schema_id
+   INNER JOIN sys.default_constraints ON 
all_columns.default_object_id = default_constraints.object_id
+   WHERE schemas.name = 'dbo' AND tables.name = @TBL_NAME AND 
all_columns.name = @COL_NAME
+
+   IF (@constraintname IS NOT NULL)
+   BEGIN
+   DECLARE @sql nvarchar(max) = 'ALTER TABLE [dbo].' + 
QUOTENAME(@TBL_NAME) + ' DROP CONSTRAINT ' + QUOTENAME(@constraintname)
+   EXEC(@sql)
+   END
+END;
+
+-- Similarly for primary key constraint
+CREATE PROCEDURE #DROP_PRIMARY_KEY_CONSTRAINT @TBL_NAME sysname
+AS
+BEGIN
+   DECLARE @constraintname sysname
+   SELECT @constraintname = constraint_name
+   FROM information_schema.table_constraints
+   WHERE constraint_type = 'PRIMARY KEY' AND table_schema = 'dbo' 
AND table_name = @TBL_NAME
+   IF @constraintname IS NOT NULL
+   BEGIN
+   DECLARE @sql_pk nvarchar(max) = 'ALTER TABLE [dbo].' + 
QUOTENAME(@TBL_NAME) + ' DROP CONSTRAINT ' + @constraintname
+   EXEC(@sql_pk)
+   end
+END;
+
 --:r 022-HIVE-14496.mssql.sql
-ALTER TABLE TBLS ADD IS_REWRITE_ENABLED bit NOT NULL DEFAULT(0);
+ALTER TABLE TBLS ADD IS_REWRITE_ENABLED bit NOT NULL CONSTRAINT 
DEFAULT_IS_REWRITE_ENABLED DEFAULT(0);
 
 --:r 023-HIVE-10562.mssql.sql
 ALTER TABLE NOTIFICATION_LOG ADD MESSAGE_FORMAT nvarchar(16);
 
 --:r 024-HIVE-12274.mssql.sql
+EXEC #DROP_DEFAULT_CONSTRAINT "SERDE_PARAMS", "PARAM_VALUE";
 ALTER TABLE "SERDE_PARAMS" ALTER COLUMN "PARAM_VALUE" nvarchar(MAX);
+EXEC #DROP_DEFAULT_CONSTRAINT "TABLE_PARAMS", "PARAM_VALUE";
 ALTER TABLE "TABLE_PARAMS" ALTER COLUMN "PARAM_VALUE" nvarchar(MAX);
+EXEC #DROP_DEFAULT_CONSTRAINT "SD_PARAMS", "PARAM_VALUE";
 ALTER TABLE "SD_PARAMS" ALTER COLUMN "PARAM_VALUE" nvarchar(MAX);
 
 ALTER TABLE "TBLS" ALTER COLUMN "TBL_NAME" nvarchar(256);
@@ -20,8 +59,10 @@ ALTER TABLE "COMPLETED_TXN_COMPONENTS" ALTER COLUMN 
"CTC_TABLE" nvarchar(256);
 
 
 -- A number of indices and constraints reference COLUMN_NAME.  These have to 
be dropped before the not null constraint
--- can be added.
-ALTER TABLE COLUMNS_V2 DROP CONSTRAINT COLUMNS_PK;
+-- can be added. Earlier versions may not have created named constraints, so 
use IF EXISTS and also
+-- the stored procedure.
+ALTER TABLE COLUMNS_V2 DROP CONSTRAINT IF EXISTS COLUMNS_PK;
+EXEC #DROP_PRIMARY_KEY_CONSTRAINT COLUMNS_V2;
 DROP INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS;
 DROP INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS;
 DROP INDEX P

[hive] branch master updated: HIVE-21573: Binary transport shall ignore principal if auth is set to delegationToken (Denes Bodo, reviewed by Zoltan Haindrich, Daniel Dai)

2019-04-08 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new a3d826d  HIVE-21573: Binary transport shall ignore principal if auth 
is set to delegationToken (Denes Bodo, reviewed by Zoltan Haindrich, Daniel Dai)
a3d826d is described below

commit a3d826d54cfaedf066c430a4f83f2b04a402c3da
Author: Denes Bodo 
AuthorDate: Mon Apr 8 14:54:50 2019 -0700

HIVE-21573: Binary transport shall ignore principal if auth is set to 
delegationToken (Denes Bodo, reviewed by Zoltan Haindrich, Daniel Dai)

Signed-off-by: Daniel Dai 
---
 .../java/org/apache/hive/jdbc/HiveConnection.java  | 31 +++---
 1 file changed, 16 insertions(+), 15 deletions(-)

diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java 
b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
index 4c7119f..ec9c193 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
@@ -685,23 +685,24 @@ public class HiveConnection implements 
java.sql.Connection {
   saslProps.put(Sasl.QOP, "auth-conf,auth-int,auth");
 }
 saslProps.put(Sasl.SERVER_AUTH, "true");
-if (sessConfMap.containsKey(JdbcConnectionParams.AUTH_PRINCIPAL)) {
+String tokenStr = null;
+if 
(JdbcConnectionParams.AUTH_TOKEN.equals(sessConfMap.get(JdbcConnectionParams.AUTH_TYPE)))
 {
+  // If there's a delegation token available then use token based 
connection
+  tokenStr = getClientDelegationToken(sessConfMap);
+}
+if (tokenStr != null) {
+  transport = KerberosSaslHelper.getTokenTransport(tokenStr,
+  host, socketTransport, saslProps);
+} else 
if(sessConfMap.containsKey(JdbcConnectionParams.AUTH_PRINCIPAL)){
   transport = KerberosSaslHelper.getKerberosTransport(
-  sessConfMap.get(JdbcConnectionParams.AUTH_PRINCIPAL), host,
-  socketTransport, saslProps, assumeSubject);
+  sessConfMap.get(JdbcConnectionParams.AUTH_PRINCIPAL), host,
+  socketTransport, saslProps, assumeSubject);
 } else {
-  // If there's a delegation token available then use token based 
connection
-  String tokenStr = getClientDelegationToken(sessConfMap);
-  if (tokenStr != null) {
-transport = KerberosSaslHelper.getTokenTransport(tokenStr,
-host, socketTransport, saslProps);
-  } else {
-// we are using PLAIN Sasl connection with user/password
-String userName = getUserName();
-String passwd = getPassword();
-// Overlay the SASL transport on top of the base socket transport 
(SSL or non-SSL)
-transport = PlainSaslHelper.getPlainTransport(userName, passwd, 
socketTransport);
-  }
+  // we are using PLAIN Sasl connection with user/password
+  String userName = getUserName();
+  String passwd = getPassword();
+  // Overlay the SASL transport on top of the base socket transport 
(SSL or non-SSL)
+  transport = PlainSaslHelper.getPlainTransport(userName, passwd, 
socketTransport);
 }
   } else {
 // Raw socket connection (non-sasl)



[hive] branch master updated: HIVE-21753: HiveMetastore authorization to enable use of HiveAuthorizer implementation (Ramesh Mani, reviewed by Daniel Dai)

2019-06-02 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 10b6d70  HIVE-21753: HiveMetastore authorization to enable use of 
HiveAuthorizer implementation (Ramesh Mani, reviewed by Daniel Dai)
10b6d70 is described below

commit 10b6d70da1442cccf533bc97f56a622ec9f39661
Author: Daniel Dai 
AuthorDate: Sun Jun 2 14:29:43 2019 -0700

HIVE-21753: HiveMetastore authorization to enable use of HiveAuthorizer 
implementation (Ramesh Mani, reviewed by Daniel Dai)
---
 .../plugin/HiveAuthzSessionContext.java|   2 +-
 .../plugin/fallback/FallbackHiveAuthorizer.java|   2 +-
 .../metastore/HiveMetaStoreAuthorizableEvent.java  |  70 +
 .../plugin/metastore/HiveMetaStoreAuthorizer.java  | 316 +
 .../plugin/metastore/HiveMetaStoreAuthzInfo.java   | 107 +++
 .../plugin/metastore/events/AddPartitionEvent.java | 112 
 .../metastore/events/AlterDatabaseEvent.java   | 111 
 .../metastore/events/AlterPartitionEvent.java  | 111 
 .../plugin/metastore/events/AlterTableEvent.java   | 119 
 .../metastore/events/CreateDatabaseEvent.java  |  97 +++
 .../plugin/metastore/events/CreateTableEvent.java  |  95 +++
 .../plugin/metastore/events/DropDatabaseEvent.java |  90 ++
 .../metastore/events/DropPartitionEvent.java   |  98 +++
 .../plugin/metastore/events/DropTableEvent.java|  86 ++
 .../metastore/events/LoadPartitionDoneEvent.java   |  74 +
 .../plugin/metastore/DummyHiveAuthorizer.java  |  72 +
 .../metastore/DummyHiveAuthorizerFactory.java  |  40 +++
 .../metastore/TestHiveMetaStoreAuthorizer.java | 266 +
 18 files changed, 1866 insertions(+), 2 deletions(-)

diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzSessionContext.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzSessionContext.java
index a26febf..30b069e 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzSessionContext.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzSessionContext.java
@@ -29,7 +29,7 @@ import 
org.apache.hadoop.hive.common.classification.InterfaceStability.Evolving;
 public final class HiveAuthzSessionContext {
 
   public enum CLIENT_TYPE {
-HIVESERVER2, HIVECLI
+HIVESERVER2, HIVECLI, HIVEMETASTORE, OTHER
   };
 
   public static class Builder {
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/fallback/FallbackHiveAuthorizer.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/fallback/FallbackHiveAuthorizer.java
index 10cf4d4..744241f 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/fallback/FallbackHiveAuthorizer.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/fallback/FallbackHiveAuthorizer.java
@@ -51,7 +51,7 @@ public class FallbackHiveAuthorizer extends 
AbstractHiveAuthorizer {
   private final HiveAuthenticationProvider authenticator;
   private String[] admins = null;
 
-  FallbackHiveAuthorizer(HiveConf hiveConf, HiveAuthenticationProvider 
hiveAuthenticator,
+  public FallbackHiveAuthorizer(HiveConf hiveConf, HiveAuthenticationProvider 
hiveAuthenticator,
 HiveAuthzSessionContext ctx) {
 this.authenticator = hiveAuthenticator;
 this.sessionCtx = applyTestSettings(ctx, hiveConf);
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/HiveMetaStoreAuthorizableEvent.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/HiveMetaStoreAuthorizableEvent.java
new file mode 100644
index 000..d3d475a
--- /dev/null
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/HiveMetaStoreAuthorizableEvent.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the Lice

[hive] branch master updated: HIVE-21829: HiveMetaStore authorization issue with AlterTable and DropTable events (Ramesh Mani, reviewed by Daniel Dai)

2019-06-04 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 85eeadb  HIVE-21829: HiveMetaStore authorization issue with AlterTable 
and DropTable events (Ramesh Mani, reviewed by Daniel Dai)
85eeadb is described below

commit 85eeadb49c2be2209206150bc959bd693ad7ed94
Author: Daniel Dai 
AuthorDate: Tue Jun 4 11:01:21 2019 -0700

HIVE-21829: HiveMetaStore authorization issue with AlterTable and DropTable 
events (Ramesh Mani, reviewed by Daniel Dai)
---
 .../plugin/metastore/HiveMetaStoreAuthorizer.java  |  4 ++--
 .../metastore/TestHiveMetaStoreAuthorizer.java | 26 +++---
 2 files changed, 25 insertions(+), 5 deletions(-)

diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/HiveMetaStoreAuthorizer.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/HiveMetaStoreAuthorizer.java
index 50c7fc6..434d1c9 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/HiveMetaStoreAuthorizer.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/HiveMetaStoreAuthorizer.java
@@ -145,13 +145,13 @@ public class HiveMetaStoreAuthorizer extends 
MetaStorePreEventListener {
   }
   break;
 case ALTER_TABLE:
-  authzEvent = new CreateTableEvent(preEventContext);
+  authzEvent = new AlterTableEvent(preEventContext);
   if (isViewOperation(preEventContext) && 
(!isSuperUser(getCurrentUser(authzEvent {
 throw new MetaException(getErrorMessage("ALTER_VIEW", 
getCurrentUser(authzEvent)));
   }
   break;
 case DROP_TABLE:
-  authzEvent = new CreateTableEvent(preEventContext);
+  authzEvent = new DropTableEvent(preEventContext);
   if (isViewOperation(preEventContext) && 
(!isSuperUser(getCurrentUser(authzEvent {
 throw new MetaException(getErrorMessage("DROP_VIEW", 
getCurrentUser(authzEvent)));
   }
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/TestHiveMetaStoreAuthorizer.java
 
b/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/TestHiveMetaStoreAuthorizer.java
index 9bbc70e..b9c0dcc 100644
--- 
a/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/TestHiveMetaStoreAuthorizer.java
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/TestHiveMetaStoreAuthorizer.java
@@ -235,7 +235,27 @@ public class TestHiveMetaStoreAuthorizer {
   }
 
   @Test
-  public void testJ_DropTable_authorizedUser() throws Exception {
+  public void testJ_AlterTable_AuthorizedUser() throws Exception {
+
UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser(authorizedUser));
+try {
+  Table table = new TableBuilder()
+  .setTableName(tblName)
+  .addCol("name", ColumnType.STRING_TYPE_NAME)
+  .setOwner(authorizedUser)
+  .build(conf);
+  hmsHandler.create_table(table);
+
+  Table alteredTable = new TableBuilder()
+  .addCol("dep", ColumnType.STRING_TYPE_NAME)
+  .build(conf);
+  hmsHandler.alter_table("default",tblName,alteredTable);
+} catch (Exception e) {
+  // No Exception for create table for authorized user
+}
+  }
+
+  @Test
+  public void testK_DropTable_authorizedUser() throws Exception {
 
UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser(authorizedUser));
 try {
   hmsHandler.drop_table(dbName,tblName,true);
@@ -245,7 +265,7 @@ public class TestHiveMetaStoreAuthorizer {
   }
 
   @Test
-  public void testK_DropDatabase_authorizedUser() throws Exception {
+  public void testL_DropDatabase_authorizedUser() throws Exception {
 
UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser(authorizedUser));
 try {
   hmsHandler.drop_database(dbName,true,true);
@@ -255,7 +275,7 @@ public class TestHiveMetaStoreAuthorizer {
   }
 
   @Test
-  public void testL_DropCatalog_SuperUser() throws Exception {
+  public void testM_DropCatalog_SuperUser() throws Exception {
 
UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser(superUser));
 try {
   hmsHandler.drop_catalog(new DropCatalogRequest(catalogName));



[hive] branch master updated: HIVE-21833: Ranger Authorization in Hive based on object ownership (Sam An, reviewed by Daniel Dai)

2019-06-17 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new fcadd38  HIVE-21833: Ranger Authorization in Hive based on object 
ownership (Sam An, reviewed by Daniel Dai)
fcadd38 is described below

commit fcadd388df2a7c12378febab167ee75a36520f6e
Author: Daniel Dai 
AuthorDate: Mon Jun 17 16:07:48 2019 -0700

HIVE-21833: Ranger Authorization in Hive based on object ownership (Sam An, 
reviewed by Daniel Dai)
---
 ql/src/java/org/apache/hadoop/hive/ql/Driver.java  |   9 +-
 .../authorization/plugin/HivePrivilegeObject.java  |  41 -
 .../TestHivePrivilegeObjectOwnerNameAndType.java   | 170 +
 .../hadoop/hive/metastore/cache/CachedStore.java   |   4 +-
 4 files changed, 217 insertions(+), 7 deletions(-)

diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java 
b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 255c65a..18438aa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.LockComponent;
 import org.apache.hadoop.hive.metastore.api.LockType;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.Schema;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.cache.results.CacheUsage;
@@ -1395,15 +1396,21 @@ public class Driver implements IDriver {
   List partKeys = null;
   List columns = null;
   String className = null;
+  String ownerName = null;
+  PrincipalType ownerType = null;
   switch(privObject.getType()){
   case DATABASE:
 dbname = privObject.getDatabase().getName();
+ownerName = privObject.getDatabase().getOwnerName();
+ownerType = privObject.getDatabase().getOwnerType();
 break;
   case TABLE:
 dbname = privObject.getTable().getDbName();
 objName = privObject.getTable().getTableName();
 columns = tableName2Cols == null ? null :
 tableName2Cols.get(Table.getCompleteName(dbname, objName));
+ownerName = privObject.getTable().getOwner();
+ownerType = privObject.getTable().getOwnerType();
 break;
   case DFS_DIR:
   case LOCAL_DIR:
@@ -1428,7 +1435,7 @@ public class Driver implements IDriver {
   }
   HivePrivObjectActionType actionType = 
AuthorizationUtils.getActionType(privObject);
   HivePrivilegeObject hPrivObject = new HivePrivilegeObject(privObjType, 
dbname, objName,
-  partKeys, columns, actionType, null, className);
+  partKeys, columns, actionType, null, className, ownerName, 
ownerType);
   hivePrivobjs.add(hPrivObject);
 }
 return hivePrivobjs;
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
index 87d2e68..b08f7ca 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
@@ -24,6 +24,7 @@ import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import 
org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
 
 /**
  * Represents the object on which privilege is being granted/revoked, and 
objects
@@ -65,6 +66,16 @@ public class HivePrivilegeObject implements 
Comparable {
   (o.className != null ? className.compareTo(o.className) : 1) :
   (o.className != null ? -1 : 0);
 }
+if (compare == 0) {
+  compare = ownerName != null?
+  (o.ownerName != null ? ownerName.compareTo(o.ownerName) : 1) :
+  (o.ownerName != null ? -1 : 0);
+}
+if (compare == 0) {
+  compare = ownerType != null?
+  (o.ownerType != null ? ownerType.compareTo(o.ownerType) : 1) :
+  (o.ownerType != null ? -1 : 0);
+}
 
 return compare;
   }
@@ -118,6 +129,8 @@ public class HivePrivilegeObject implements 
Comparable {
   private final List columns;
   private final HivePrivObjectActionType actionType;
   private final String className;
+  private final String ownerName;
+  private final PrincipalType ownerType;
   // cellValueTransformers is corresponding to the columns.
   // Its size should be the same as columns.
   // For example, if a table has two columns, "key" and "value"
@@ -164,9 +177,14 @@ public class HivePrivilegeObject im

[hive] branch master updated: HIVE-21902: HiveServer2 UI: jetty response header needs X-Frame-Options (Rajkumar Singh, reviewed by Daniel Dai)

2019-06-25 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 3d21614  HIVE-21902: HiveServer2 UI: jetty response header needs 
X-Frame-Options (Rajkumar Singh, reviewed by Daniel Dai)
3d21614 is described below

commit 3d2161425cea2bdebe0ef233ac4f46ba7d75e498
Author: Daniel Dai 
AuthorDate: Tue Jun 25 14:02:37 2019 -0700

HIVE-21902: HiveServer2 UI: jetty response header needs X-Frame-Options 
(Rajkumar Singh, reviewed by Daniel Dai)
---
 .../java/org/apache/hadoop/hive/conf/HiveConf.java |   4 +
 .../src/java/org/apache/hive/http/HttpServer.java  | 289 -
 .../apache/hive/service/server/HiveServer2.java|   3 +
 .../hive/service/server/TestHS2HttpServer.java |  13 +
 4 files changed, 306 insertions(+), 3 deletions(-)

diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 600b187..0215d09 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -3283,6 +3283,10 @@ public class HiveConf extends Configuration {
 
HIVE_SERVER2_WEBUI_CORS_ALLOWED_HEADERS("hive.server2.webui.cors.allowed.headers",
   "X-Requested-With,Content-Type,Accept,Origin",
   "Comma separated list of http headers that are allowed when CORS is 
enabled.\n"),
+HIVE_SERVER2_WEBUI_XFRAME_ENABLED("hive.server2.webui.xframe.enabled", 
true,
+"Whether to enable xframe\n"),
+HIVE_SERVER2_WEBUI_XFRAME_VALUE("hive.server2.webui.xframe.value", 
"SAMEORIGIN",
+"Configuration to allow the user to set the x_frame-options 
value\n"),
 
 
 // Tez session settings
diff --git a/common/src/java/org/apache/hive/http/HttpServer.java 
b/common/src/java/org/apache/hive/http/HttpServer.java
index 24c5422..bbcc67e 100644
--- a/common/src/java/org/apache/hive/http/HttpServer.java
+++ b/common/src/java/org/apache/hive/http/HttpServer.java
@@ -25,15 +25,25 @@ import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.Collections;
+import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
 import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletRequestWrapper;
 import javax.servlet.http.HttpServletResponse;
 
 import com.google.common.base.Preconditions;
@@ -44,6 +54,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.http.HtmlQuoting;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
@@ -100,6 +111,20 @@ public class HttpServer {
 
   public static final String CONF_CONTEXT_ATTRIBUTE = "hive.conf";
   public static final String ADMINS_ACL = "admins.acl";
+  private XFrameOption xFrameOption;
+  private boolean xFrameOptionIsEnabled;
+  public static final String HTTP_HEADER_PREFIX = "hadoop.http.header.";
+  private static final String X_FRAME_OPTIONS = "X-FRAME-OPTIONS";
+  static final String X_XSS_PROTECTION  =
+  "X-XSS-Protection:1; mode=block";
+  static final String X_CONTENT_TYPE_OPTIONS =
+  "X-Content-Type-Options:nosniff";
+  private static final String HTTP_HEADER_REGEX =
+  "hadoop\\.http\\.header\\.([a-zA-Z\\-_]+)";
+  private static final Pattern PATTERN_HTTP_HEADER_REGEX =
+  Pattern.compile(HTTP_HEADER_REGEX);
+
+
 
   private final String name;
   private String appDir;
@@ -111,7 +136,8 @@ public class HttpServer {
*/
   private HttpServer(final Builder b) throws IOException {
 this.name = b.name;
-
+this.xFrameOptionIsEnabled = b.xFrameEnabled;
+this.xFrameOption = b.xFrameOption;
 createWebServer(b);
   }
 
@@ -135,6 +161,8 @@ public class HttpServer {
 private String allowedHeaders;
 private PamAuthenticator pamAuthenticator;
 private String contextRootRewriteTarget = "/index.html";
+private boolean xFrameEnabled;
+private XFrameOption xFrameOption = XFrameOp

[hive] branch master updated: HIVE-21625: Fix TxnIdUtils.checkEquivalentWriteIds, also provides a comparison method (Daniel Dai, reviewed by Jason Dere)

2019-05-06 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new a20ffca  HIVE-21625: Fix TxnIdUtils.checkEquivalentWriteIds, also 
provides a comparison method (Daniel Dai, reviewed by Jason Dere)
a20ffca is described below

commit a20ffcaf6d9c5639402f003236398a06e2177924
Author: Daniel Dai 
AuthorDate: Mon May 6 21:35:09 2019 -0700

HIVE-21625: Fix TxnIdUtils.checkEquivalentWriteIds, also provides a 
comparison method (Daniel Dai, reviewed by Jason Dere)
---
 .../org/apache/hive/common/util/TxnIdUtils.java| 95 --
 .../apache/hive/common/util/TestTxnIdUtils.java| 77 ++
 2 files changed, 128 insertions(+), 44 deletions(-)

diff --git a/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java 
b/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java
index 4b3cb7d..bd972d4 100644
--- a/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java
+++ b/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java
@@ -19,62 +19,69 @@ package org.apache.hive.common.util;
 
 import org.apache.hadoop.hive.common.ValidWriteIdList;
 
-import java.util.*;
-
 public class TxnIdUtils {
 
   /**
* Check if 2 ValidWriteIdLists are at an equivalent commit point.
*/
   public static boolean checkEquivalentWriteIds(ValidWriteIdList a, 
ValidWriteIdList b) {
+return compare(a, b) == 0;
+  }
+
+  /*** Compare the freshness of two ValidWriteIdList
+   * @param a
+   * @param b
+   * @return 0, if a and b are equivalent
+   * 1, if a is more recent
+   * -1, if b is more recent
+   ***/
+  public static int compare(ValidWriteIdList a, ValidWriteIdList b) {
 if (!a.getTableName().equalsIgnoreCase(b.getTableName())) {
-  return false;
+  return 
a.getTableName().toLowerCase().compareTo(b.getTableName().toLowerCase());
 }
-ValidWriteIdList newer = a;
-ValidWriteIdList older = b;
-if (a.getHighWatermark() < b.getHighWatermark()) {
-  newer = b;
-  older = a;
+// The algorithm assumes invalidWriteIds are sorted and values are less or 
equal than hwm, here is how
+// the algorithm works:
+// 1. Compare two invalidWriteIds until one the list ends, difference 
means the mismatch writeid is
+//committed in one ValidWriteIdList but not the other, the comparison 
end
+// 2. Every writeid from the last writeid in the short invalidWriteIds 
till its hwm should be committed
+//in the other ValidWriteIdList, otherwise the comparison end
+// 3. Every writeid from lower hwm to higher hwm should be invalid, 
otherwise, the comparison end
+int minLen = Math.min(a.getInvalidWriteIds().length, 
b.getInvalidWriteIds().length);
+for (int i=0;i b.getInvalidWriteIds()[i]?1:-1;
 }
-
-return checkEquivalentCommittedIds(
-older.getHighWatermark(), older.getInvalidWriteIds(),
-newer.getHighWatermark(), newer.getInvalidWriteIds());
-  }
-
-  /**
-   * Check the min open ID/highwater mark/exceptions list to see if 2 ID lists 
are at the same commit point.
-   * This can also be used for ValidTxnList as well as ValidWriteIdList.
-   */
-  private static boolean checkEquivalentCommittedIds(
-  long oldHWM, long[] oldInvalidIds,
-  long newHWM, long[] newInvalidIds) {
-
-// There should be no valid txns in newer list that are not also in older.
-// - All values in oldInvalidIds should also be in newInvalidIds.
-// - if oldHWM < newHWM, then all IDs between oldHWM .. newHWM should 
exist in newInvalidTxns.
-//   A Gap in the sequence means a committed txn in newer list (lists are 
not equivalent)
-
-if (newInvalidIds.length < oldInvalidIds.length) {
-  return false;
+if (a.getInvalidWriteIds().length == b.getInvalidWriteIds().length) {
+  return Long.signum(a.getHighWatermark() - b.getHighWatermark());
 }
-
-// Check that the values in the older list are also in newer. Lists should 
already be sorted.
-for (int idx = 0; idx < oldInvalidIds.length; ++idx) {
-  if (oldInvalidIds[idx] != newInvalidIds[idx]) {
-return false;
+if (a.getInvalidWriteIds().length == minLen) {
+  if (a.getHighWatermark() != b.getInvalidWriteIds()[minLen] -1) {
+return Long.signum(a.getHighWatermark() - 
(b.getInvalidWriteIds()[minLen] -1));
+  }
+  if (allInvalidFrom(b.getInvalidWriteIds(), minLen, 
b.getHighWatermark())) {
+return 0;
+  } else {
+return -1;
+  }
+} else {
+  if (b.getHighWatermark() != a.getInvalidWriteIds()[minLen] -1) {
+return Long.signum(b.getHighWatermark() - 
(a.getInvalidWriteIds()[minLen] -1));
+  }
+  if (allInvalidFrom(a.getInvalidWriteIds(), minLen, 
a.getHighWatermark())) {
+return 0;
+  } else {
+

[hive] branch master updated: HIVE-20615: CachedStore: Background refresh thread bug fixes (Vaibhav Gumashta, reviewed by Daniel Dai)

2019-04-19 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new b58d50c  HIVE-20615: CachedStore: Background refresh thread bug fixes 
(Vaibhav Gumashta, reviewed by Daniel Dai)
b58d50c is described below

commit b58d50cb73a1f79a5d079e0a2c5ac33d2efc33a0
Author: Daniel Dai 
AuthorDate: Fri Apr 19 10:23:01 2019 -0700

HIVE-20615: CachedStore: Background refresh thread bug fixes (Vaibhav 
Gumashta, reviewed by Daniel Dai)
---
 .../hadoop/hive/metastore/cache/CacheUtils.java|  4 +++
 .../hadoop/hive/metastore/cache/CachedStore.java   |  4 ++-
 .../hadoop/hive/metastore/cache/SharedCache.java   | 29 ++
 3 files changed, 25 insertions(+), 12 deletions(-)

diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
index 944c813..d50fa13 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
@@ -40,6 +40,10 @@ public class CacheUtils {
 return buildKey(catName.toLowerCase(), dbName.toLowerCase());
   }
 
+  public static String buildDbKeyWithDelimiterSuffix(String catName, String 
dbName) {
+return buildKey(catName.toLowerCase(), dbName.toLowerCase()) + delimit;
+  }
+
   /**
* Builds a key for the partition cache which is concatenation of partition 
values, each value
* separated by a delimiter
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index e366ebd..6ef9a19 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -724,6 +724,7 @@ public class CachedStore implements RawStore, Configurable {
   } else {
 try {
   triggerPreWarm(rawStore);
+  shouldRunPrewarm = false;
 } catch (Exception e) {
   LOG.error("Prewarm failure", e);
   return;
@@ -815,7 +816,6 @@ public class CachedStore implements RawStore, Configurable {
 if (table != null && !table.isSetPartitionKeys()) {
   List colNames = MetaStoreUtils.getColumnNamesForTable(table);
   Deadline.startTimer("getTableColumnStatistics");
-
   ColumnStatistics tableColStats =
   rawStore.getTableColumnStatistics(catName, dbName, tblName, 
colNames);
   Deadline.stopTimer();
@@ -865,7 +865,9 @@ public class CachedStore implements RawStore, Configurable {
   rawStore.getPartitionColumnStatistics(catName, dbName, 
tblName, partNames, colNames);
   Deadline.stopTimer();
   sharedCache.refreshPartitionColStatsInCache(catName, dbName, 
tblName, partitionColStats);
+  Deadline.startTimer("getPartitionsByNames");
   List parts = rawStore.getPartitionsByNames(catName, 
dbName, tblName, partNames);
+  Deadline.stopTimer();
   // Also save partitions for consistency as they have the stats state.
   for (Partition part : parts) {
 sharedCache.alterPartitionInCache(catName, dbName, tblName, 
part.getValues(), part);
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
index 1c23022..60862d4 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
@@ -1317,11 +1317,13 @@ public class SharedCache {
 //in case of retry, ignore second try.
 return;
   }
-  byte[] sdHash = tblWrapper.getSdHash();
-  if (sdHash != null) {
-decrSd(sdHash);
+  if (tblWrapper != null) {
+byte[] sdHash = tblWrapper.getSdHash();
+if (sdHash != null) {
+  decrSd(sdHash);
+}
+isTableCacheDirty.set(true);
   }
-  isTableCacheDirty.set(true);
 } finally {
   cacheLock.writeLock().unlock();
 }
@@ -1438,25 +1440,30 @@ public class SharedCache {
 
   public void refreshTablesInCache(String catName, String dbName, List 
tables) {
 try {
-  cacheLock.writeLock().lock();
   if (isTab

[hive] branch master updated: HIVE-21672: HiveServer2 needs to support sidecar's 'ldap.xml' file (Sam An, reviewed by Daniel Dai)

2019-05-03 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 46f45e3d HIVE-21672: HiveServer2 needs to support sidecar's 'ldap.xml' 
file (Sam An, reviewed by Daniel Dai)
46f45e3d is described below

commit 46f45e3d417eaede320b1088a37721db5901267d
Author: Sam An 
AuthorDate: Fri May 3 14:26:17 2019 -0700

HIVE-21672: HiveServer2 needs to support sidecar's 'ldap.xml' file (Sam An, 
reviewed by Daniel Dai)

Signed-off-by: Daniel Dai 
---
 .../java/org/apache/hadoop/hive/conf/HiveConf.java | 18 ++-
 .../org/apache/hadoop/hive/conf/TestHiveConf.java  | 57 +-
 2 files changed, 73 insertions(+), 2 deletions(-)

diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 0c2bd1e..049e837 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -4703,7 +4703,11 @@ public class HiveConf extends Configuration {
 "This parameter enables a number of optimizations when running on 
blobstores:\n" +
 "(1) If hive.blobstore.use.blobstore.as.scratchdir is false, force 
the last Hive job to write to the blobstore.\n" +
 "This is a performance optimization that forces the final 
FileSinkOperator to write to the blobstore.\n" +
-"See HIVE-15121 for details.");
+"See HIVE-15121 for details."),
+
+HIVE_ADDITIONAL_CONFIG_FILES("hive.additional.config.files", "",
+"The names of additional config files, such as ldap-site.xml," +
+"spark-site.xml, etc in comma separated list.");
 
 public final String varname;
 public final String altName;
@@ -5474,6 +5478,18 @@ public class HiveConf extends Configuration {
   addResource(hiveServer2SiteUrl);
 }
 
+String val = this.getVar(HiveConf.ConfVars.HIVE_ADDITIONAL_CONFIG_FILES);
+ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+
+if (val != null && !val.isEmpty()) {
+  String[] configFiles = val.split(",");
+  for (String config : configFiles) {
+URL configURL = findConfigFile(classLoader, config, true);
+if (configURL != null) {
+  addResource(configURL);
+}
+  }
+}
 // Overlay the values of any system properties and manual overrides
 applySystemProperties();
 
diff --git a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java 
b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java
index 780a708..bf9dee7 100644
--- a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java
+++ b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java
@@ -18,15 +18,17 @@
 package org.apache.hadoop.hive.conf;
 
 import com.google.common.collect.Lists;
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.util.Shell;
 import org.apache.hive.common.util.HiveTestUtils;
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.io.File;
 import java.io.UnsupportedEncodingException;
+import java.net.URL;
 import java.net.URLEncoder;
 import java.util.ArrayList;
 import java.util.concurrent.TimeUnit;
@@ -189,4 +191,57 @@ public class TestHiveConf {
 Assert.assertEquals(URLEncoder.encode(query, "UTF-8"), 
conf.get(ConfVars.HIVEQUERYSTRING.varname));
 Assert.assertEquals(query, conf.getQueryString());
   }
+
+  @Test
+  public void testAdditionalConfigFiles() throws Exception{
+URL url = ClassLoader.getSystemResource("hive-site.xml");
+File fileHiveSite = new File(url.getPath());
+
+String parFolder = fileHiveSite.getParent();
+//back up hive-site.xml
+String bakHiveSiteFileName = parFolder + "/hive-site-bak.xml";
+File fileBakHiveSite = new File(bakHiveSiteFileName);
+FileUtils.copyFile(fileHiveSite, fileBakHiveSite);
+
+String content = FileUtils.readFileToString(fileHiveSite);
+content = content.substring(0, content.lastIndexOf(""));
+
+String testHiveSiteString = content + "\n" +
+" HIVE_SERVER2_PLAIN_LDAP_DOMAIN\n" +
+" a.com\n" +
+"\n" +
+"\n" +
+" \n" +
+"   hive.additional.config.files\n" +
+"   ldap-site.xml,other.xml\n" +
+"   additional config dir for Hive to 
load\n" +
+" \n" +
+"\n" +
+"";
+
+   

[hive] branch master updated: HIVE-20613: CachedStore: Add more UT coverage (outside of .q files) (Vaibhav Gumashta, reviewed by Daniel Dai)

2019-05-05 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new c5e6608  HIVE-20613: CachedStore: Add more UT coverage (outside of .q 
files) (Vaibhav Gumashta, reviewed by Daniel Dai)
c5e6608 is described below

commit c5e6608246754380dd0de20be72b850fabe60df8
Author: Daniel Dai 
AuthorDate: Sun May 5 19:57:05 2019 -0700

HIVE-20613: CachedStore: Add more UT coverage (outside of .q files) 
(Vaibhav Gumashta, reviewed by Daniel Dai)
---
 .../hadoop/hive/metastore/cache/CachedStore.java   |  205 +--
 .../hadoop/hive/metastore/cache/SharedCache.java   |   58 +-
 .../hive/metastore/cache/TestCachedStore.java  | 1354 ++--
 3 files changed, 1095 insertions(+), 522 deletions(-)

diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 1fac51e..a5d0c04 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -36,6 +36,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.commons.lang.exception.ExceptionUtils;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -108,13 +109,18 @@ public class CachedStore implements RawStore, 
Configurable {
   private static long DEFAULT_CACHE_REFRESH_PERIOD = 100;
   // Time after which metastore cache is updated from metastore DB by the 
background update thread
   private static long cacheRefreshPeriodMS = DEFAULT_CACHE_REFRESH_PERIOD;
+  private static int MAX_RETRIES = 10;
+  // This is set to true only after prewarm is complete
   private static AtomicBoolean isCachePrewarmed = new AtomicBoolean(false);
+  // This is set to true only if we were able to cache all the metadata.
+  // We may not be able to cache all metadata if we hit 
CACHED_RAW_STORE_MAX_CACHE_MEMORY limit.
+  private static AtomicBoolean isCachedAllMetadata = new AtomicBoolean(false);
   private static TablesPendingPrewarm tblsPendingPrewarm = new 
TablesPendingPrewarm();
   private RawStore rawStore = null;
   private Configuration conf;
   private static boolean areTxnStatsSupported;
   private PartitionExpressionProxy expressionProxy = null;
-  private static final SharedCache sharedCache = new SharedCache();
+  private static SharedCache sharedCache = new SharedCache();
   private static  boolean canUseEvents = false;
   private static long lastEventId;
 
@@ -196,8 +202,8 @@ public class CachedStore implements RawStore, Configurable {
 MetastoreConf.getSizeVar(conf, 
ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY);
 sharedCache.initialize(maxSharedCacheSizeInBytes);
 if (maxSharedCacheSizeInBytes > 0) {
-  LOG.info("Maximum memory that the cache will use: {} GB",
-  maxSharedCacheSizeInBytes / (1024 * 1024 * 1024));
+  LOG.info("Maximum memory that the cache will use: {} KB",
+  maxSharedCacheSizeInBytes / (1024));
 }
   }
 
@@ -412,8 +418,7 @@ public class CachedStore implements RawStore, Configurable {
   Collection catalogsToCache;
   try {
 catalogsToCache = catalogsToCache(rawStore);
-LOG.info("Going to cache catalogs: "
-+ org.apache.commons.lang.StringUtils.join(catalogsToCache, ", "));
+LOG.info("Going to cache catalogs: " + 
org.apache.commons.lang.StringUtils.join(catalogsToCache, ", "));
 List catalogs = new ArrayList<>(catalogsToCache.size());
 for (String catName : catalogsToCache) {
   catalogs.add(rawStore.getCatalog(catName));
@@ -441,8 +446,7 @@ public class CachedStore implements RawStore, Configurable {
   databases.add(rawStore.getDatabase(catName, dbName));
 } catch (NoSuchObjectException e) {
   // Continue with next database
-  LOG.warn("Failed to cache database "
-  + DatabaseName.getQualified(catName, dbName) + ", moving 
on", e);
+  LOG.warn("Failed to cache database " + 
DatabaseName.getQualified(catName, dbName) + ", moving on", e);
 }
   }
 } catch (MetaException e) {
@@ -450,8 +454,7 @@ public class CachedStore implements RawStore, Configurable {
 }
   }
   sharedCache.populateDatabasesInCache(databases);
-  LOG.info(
-  "Databases cache is now prewarmed. Now adding tables, partitions and 
statistic

[hive] branch master updated: HIVE-20615: CachedStore: Background refresh thread bug fixes (Vaibhav Gumashta, reviewed by Daniel Dai)

2019-05-05 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new aebfaad  HIVE-20615: CachedStore: Background refresh thread bug fixes 
(Vaibhav Gumashta, reviewed by Daniel Dai)
aebfaad is described below

commit aebfaad137226a69a7424c9d377625963e7e4fdd
Author: Daniel Dai 
AuthorDate: Sun May 5 09:56:30 2019 -0700

HIVE-20615: CachedStore: Background refresh thread bug fixes (Vaibhav 
Gumashta, reviewed by Daniel Dai)
---
 .../hadoop/hive/metastore/cache/CacheUtils.java|  4 ++
 .../hadoop/hive/metastore/cache/CachedStore.java   |  7 +++-
 .../hadoop/hive/metastore/cache/SharedCache.java   | 45 +++---
 3 files changed, 41 insertions(+), 15 deletions(-)

diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
index 944c813..d50fa13 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
@@ -40,6 +40,10 @@ public class CacheUtils {
 return buildKey(catName.toLowerCase(), dbName.toLowerCase());
   }
 
+  public static String buildDbKeyWithDelimiterSuffix(String catName, String 
dbName) {
+return buildKey(catName.toLowerCase(), dbName.toLowerCase()) + delimit;
+  }
+
   /**
* Builds a key for the partition cache which is concatenation of partition 
values, each value
* separated by a delimiter
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 2d738f6..1fac51e 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -490,7 +490,7 @@ public class CachedStore implements RawStore, Configurable {
   List partitionColStats = null;
   AggrStats aggrStatsAllPartitions = null;
   AggrStats aggrStatsAllButDefaultPartition = null;
-  if (table.isSetPartitionKeys()) {
+  if (!table.getPartitionKeys().isEmpty()) {
 Deadline.startTimer("getPartitions");
 partitions = rawStore.getPartitions(catName, dbName, tblName, 
Integer.MAX_VALUE);
 Deadline.stopTimer();
@@ -561,6 +561,7 @@ public class CachedStore implements RawStore, Configurable {
 LOG.debug("Processed database: {}. Cached {} / {} databases so far.", 
dbName,
 ++numberOfDatabasesCachedSoFar, databases.size());
   }
+  sharedCache.clearDirtyFlags();
   completePrewarm(startTime);
 }
   }
@@ -724,6 +725,7 @@ public class CachedStore implements RawStore, Configurable {
   } else {
 try {
   triggerPreWarm(rawStore);
+  shouldRunPrewarm = false;
 } catch (Exception e) {
   LOG.error("Prewarm failure", e);
   return;
@@ -815,7 +817,6 @@ public class CachedStore implements RawStore, Configurable {
 if (table != null && !table.isSetPartitionKeys()) {
   List colNames = MetaStoreUtils.getColumnNamesForTable(table);
   Deadline.startTimer("getTableColumnStatistics");
-
   ColumnStatistics tableColStats =
   rawStore.getTableColumnStatistics(catName, dbName, tblName, 
colNames);
   Deadline.stopTimer();
@@ -865,7 +866,9 @@ public class CachedStore implements RawStore, Configurable {
   rawStore.getPartitionColumnStatistics(catName, dbName, 
tblName, partNames, colNames);
   Deadline.stopTimer();
   sharedCache.refreshPartitionColStatsInCache(catName, dbName, 
tblName, partitionColStats);
+  Deadline.startTimer("getPartitionsByNames");
   List parts = rawStore.getPartitionsByNames(catName, 
dbName, tblName, partNames);
+  Deadline.stopTimer();
   // Also save partitions for consistency as they have the stats state.
   for (Partition part : parts) {
 sharedCache.alterPartitionInCache(catName, dbName, tblName, 
part.getValues(), part);
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
index 1c23022..a0636b6 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/

[hive] branch master updated: HIVE-14737: Problem accessing /logs in a Kerberized Hive Server 2 Web UI (Rajkumar Singh, reviewed by Daniel Dai)

2019-06-26 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 22a5e21  HIVE-14737: Problem accessing /logs in a Kerberized Hive 
Server 2 Web UI (Rajkumar Singh, reviewed by Daniel Dai)
22a5e21 is described below

commit 22a5e211df38a5de1cc3d1e6b15c52389aace2e1
Author: Daniel Dai 
AuthorDate: Wed Jun 26 21:05:38 2019 -0700

HIVE-14737: Problem accessing /logs in a Kerberized Hive Server 2 Web UI 
(Rajkumar Singh, reviewed by Daniel Dai)
---
 common/src/java/org/apache/hive/http/HttpServer.java | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/common/src/java/org/apache/hive/http/HttpServer.java 
b/common/src/java/org/apache/hive/http/HttpServer.java
index bbcc67e..8968529 100644
--- a/common/src/java/org/apache/hive/http/HttpServer.java
+++ b/common/src/java/org/apache/hive/http/HttpServer.java
@@ -462,7 +462,7 @@ public class HttpServer {
   /**
* Secure the web server with kerberos (AuthenticationFilter).
*/
-  void setupSpnegoFilter(Builder b) throws IOException {
+  void setupSpnegoFilter(Builder b, ServletContextHandler ctx) throws 
IOException {
 Map params = new HashMap();
 params.put("kerberos.principal",
   SecurityUtil.getServerPrincipal(b.spnegoPrincipal, b.host));
@@ -471,8 +471,7 @@ public class HttpServer {
 FilterHolder holder = new FilterHolder();
 holder.setClassName(AuthenticationFilter.class.getName());
 holder.setInitParameters(params);
-
-ServletHandler handler = webAppContext.getServletHandler();
+ServletHandler handler = ctx.getServletHandler();
 handler.addFilterWithMapping(
   holder, "/*", FilterMapping.ALL);
   }
@@ -565,7 +564,7 @@ public class HttpServer {
 
 if (b.useSPNEGO) {
   // Secure the web server with kerberos
-  setupSpnegoFilter(b);
+  setupSpnegoFilter(b, webAppContext);
 }
 
 if (b.enableCORS) {
@@ -648,6 +647,9 @@ public class HttpServer {
   ServletContextHandler logCtx =
 new ServletContextHandler(contexts, "/logs");
   setContextAttributes(logCtx.getServletContext(), b.contextAttrs);
+  if(b.useSPNEGO) {
+setupSpnegoFilter(b,logCtx);
+  }
   logCtx.addServlet(AdminAuthorizedServlet.class, "/*");
   logCtx.setResourceBase(logDir);
   logCtx.setDisplayName("logs");



[hive] branch master updated: HIVE-19831: Hiveserver2 should skip doAuth checks for CREATE DATABASE/TABLE if database/table already exists (Rajkumar Singh, reviewed by Daniel Dai)

2019-07-01 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 2d9e0e4  HIVE-19831: Hiveserver2 should skip doAuth checks for CREATE 
DATABASE/TABLE if database/table already exists (Rajkumar Singh, reviewed by 
Daniel Dai)
2d9e0e4 is described below

commit 2d9e0e41ec6db234b8a759f1c5aff8f79227f158
Author: Daniel Dai 
AuthorDate: Mon Jul 1 11:22:15 2019 -0700

HIVE-19831: Hiveserver2 should skip doAuth checks for CREATE DATABASE/TABLE 
if database/table already exists (Rajkumar Singh, reviewed by Daniel Dai)
---
 ql/src/java/org/apache/hadoop/hive/ql/Driver.java | 9 +
 1 file changed, 9 insertions(+)

diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java 
b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index ae622c8..8c764e2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -1088,6 +1088,15 @@ public class Driver implements IDriver {
 additionalInputs.add(new ReadEntity(e.getTable()));
   }
 }
+// skipping the auth check for the "CREATE DATABASE" operation if database 
already exists
+// we know that if the database already exists then "CREATE DATABASE" 
operation will fail.
+if(op.equals(HiveOperation.CREATEDATABASE)){
+  for (WriteEntity e : sem.getOutputs()) {
+if(e.getType() == Entity.Type.DATABASE && 
db.databaseExists(e.getName().split(":")[1])){
+  return;
+}
+  }
+}
 
 Set additionalOutputs = new HashSet();
 for (WriteEntity e : sem.getOutputs()) {



[hive] branch master updated: HIVE-21927: HiveServer Web UI: Setting the HttpOnly option in the cookies (Rajkumar Singh, reviewed by Daniel Dai)

2019-06-28 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new b2a265a  HIVE-21927: HiveServer Web UI: Setting the HttpOnly option in 
the cookies (Rajkumar Singh, reviewed by Daniel Dai)
b2a265a is described below

commit b2a265a94625851afa3d16b8af44d3d04a0f1579
Author: Daniel Dai 
AuthorDate: Fri Jun 28 15:03:38 2019 -0700

HIVE-21927: HiveServer Web UI: Setting the HttpOnly option in the cookies 
(Rajkumar Singh, reviewed by Daniel Dai)
---
 common/src/java/org/apache/hive/http/HttpServer.java | 1 +
 1 file changed, 1 insertion(+)

diff --git a/common/src/java/org/apache/hive/http/HttpServer.java 
b/common/src/java/org/apache/hive/http/HttpServer.java
index 8968529..35ab7f8 100644
--- a/common/src/java/org/apache/hive/http/HttpServer.java
+++ b/common/src/java/org/apache/hive/http/HttpServer.java
@@ -453,6 +453,7 @@ public class HttpServer {
   WebAppContext createWebAppContext(Builder b) {
 WebAppContext ctx = new WebAppContext();
 setContextAttributes(ctx.getServletContext(), b.contextAttrs);
+ctx.getServletContext().getSessionCookieConfig().setHttpOnly(true);
 ctx.setDisplayName(b.name);
 ctx.setContextPath("/");
 ctx.setWar(appDir + "/" + b.name);



[hive] branch master updated: HIVE-21986: HiveServer Web UI: Setting the Strict-Transport-Security in default response header (Rajkumar Singh, reviewed by Gopal V)

2019-07-17 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 96dc429  HIVE-21986: HiveServer Web UI: Setting the 
Strict-Transport-Security in default response header (Rajkumar Singh, reviewed 
by Gopal V)
96dc429 is described below

commit 96dc42999619a4c313e769e5335f6fbefb3d9167
Author: Daniel Dai 
AuthorDate: Wed Jul 17 11:41:20 2019 -0700

HIVE-21986: HiveServer Web UI: Setting the Strict-Transport-Security in 
default response header (Rajkumar Singh, reviewed by Gopal V)
---
 common/src/java/org/apache/hive/http/HttpServer.java | 8 
 1 file changed, 8 insertions(+)

diff --git a/common/src/java/org/apache/hive/http/HttpServer.java 
b/common/src/java/org/apache/hive/http/HttpServer.java
index 35ab7f8..b3ce8da 100644
--- a/common/src/java/org/apache/hive/http/HttpServer.java
+++ b/common/src/java/org/apache/hive/http/HttpServer.java
@@ -113,12 +113,15 @@ public class HttpServer {
   public static final String ADMINS_ACL = "admins.acl";
   private XFrameOption xFrameOption;
   private boolean xFrameOptionIsEnabled;
+  private boolean isSSLEnabled;
   public static final String HTTP_HEADER_PREFIX = "hadoop.http.header.";
   private static final String X_FRAME_OPTIONS = "X-FRAME-OPTIONS";
   static final String X_XSS_PROTECTION  =
   "X-XSS-Protection:1; mode=block";
   static final String X_CONTENT_TYPE_OPTIONS =
   "X-Content-Type-Options:nosniff";
+  static final String STRICT_TRANSPORT_SECURITY =
+  "Strict-Transport-Security:max-age=31536000; includeSubDomains";
   private static final String HTTP_HEADER_REGEX =
   "hadoop\\.http\\.header\\.([a-zA-Z\\-_]+)";
   private static final Pattern PATTERN_HTTP_HEADER_REGEX =
@@ -137,6 +140,7 @@ public class HttpServer {
   private HttpServer(final Builder b) throws IOException {
 this.name = b.name;
 this.xFrameOptionIsEnabled = b.xFrameEnabled;
+this.isSSLEnabled = b.useSSL;
 this.xFrameOption = b.xFrameOption;
 createWebServer(b);
   }
@@ -675,6 +679,10 @@ public class HttpServer {
 splitVal = X_XSS_PROTECTION.split(":");
 headers.put(HTTP_HEADER_PREFIX + splitVal[0],
 splitVal[1]);
+if(this.isSSLEnabled){
+  splitVal = STRICT_TRANSPORT_SECURITY.split(":");
+  headers.put(HTTP_HEADER_PREFIX + splitVal[0],splitVal[1]);
+}
 return headers;
   }
 



[hive] branch master updated: HIVE-21571: SHOW COMPACTIONS shows column names as its first output row (Rajkumar Singh, reviewed by Daniel Dai)

2019-07-07 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 67e515f  HIVE-21571: SHOW COMPACTIONS shows column names as its first 
output row (Rajkumar Singh, reviewed by Daniel Dai)
67e515f is described below

commit 67e515ffab2744e451752d750c312876c7e602f1
Author: Daniel Dai 
AuthorDate: Sun Jul 7 20:36:40 2019 -0700

HIVE-21571: SHOW COMPACTIONS shows column names as its first output row 
(Rajkumar Singh, reviewed by Daniel Dai)
---
 .../hadoop/hive/ql/ddl/process/ShowCompactionsOperation.java  | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowCompactionsOperation.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowCompactionsOperation.java
index 87419be..4bf45fc 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowCompactionsOperation.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/ShowCompactionsOperation.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
 import org.apache.hadoop.hive.ql.ddl.DDLUtils;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.session.SessionState;
 
 /**
  * Operation process of showing compactions.
@@ -40,13 +41,16 @@ public class ShowCompactionsOperation extends 
DDLOperation
 
   @Override
   public int execute() throws HiveException {
+SessionState sessionState = SessionState.get();
 // Call the metastore to get the status of all known compactions 
(completed get purged eventually)
 ShowCompactResponse rsp = context.getDb().showCompactions();
 
 // Write the results into the file
 try (DataOutputStream os = DDLUtils.getOutputStream(new 
Path(desc.getResFile()), context)) {
-  // Write a header
-  writeHeader(os);
+  // Write a header for cliDriver
+  if(!sessionState.isHiveServerQuery()) {
+writeHeader(os);
+  }
 
   if (rsp.getCompacts() != null) {
 for (ShowCompactResponseElement e : rsp.getCompacts()) {



[hive] branch master updated: HIVE-27600: Reduce filesystem calls in OrcFileMergeOperator (#4579)

2023-08-19 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new b2433c3ef5f HIVE-27600: Reduce filesystem calls in 
OrcFileMergeOperator (#4579)
b2433c3ef5f is described below

commit b2433c3ef5faf1a69f5e29643a4a8d12c5528934
Author: yigress <104102129+yigr...@users.noreply.github.com>
AuthorDate: Sat Aug 19 22:16:28 2023 -0700

HIVE-27600: Reduce filesystem calls in OrcFileMergeOperator (#4579)
---
 .../hadoop/hive/ql/exec/OrcFileMergeOperator.java  | 34 --
 1 file changed, 12 insertions(+), 22 deletions(-)

diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
index 5fec9c8a1f3..cb538c4a708 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
@@ -25,6 +25,7 @@ import java.util.Map;
 import org.apache.commons.lang3.exception.ExceptionUtils;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.io.orc.Writer;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.orc.TypeDescription;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -60,8 +61,8 @@ public class OrcFileMergeOperator extends
 
   private Map outWriters = new HashMap<>();
   private Path prevPath;
-  private Reader reader;
   private FSDataInputStream fdis;
+  private ObjectInspector obi;
 
   /** Kryo ctor. */
   protected OrcFileMergeOperator() {
@@ -110,10 +111,16 @@ public class OrcFileMergeOperator extends
   if (prevPath == null) {
 prevPath = k.getInputPath();
   }
-  if (reader == null) {
-reader = OrcFile.createReader(fs, k.getInputPath());
-LOG.info("ORC merge file input path: " + k.getInputPath());
+  if (obi == null) {
+Reader reader = OrcFile.createReader(fs, prevPath);
+obi = reader.getObjectInspector();
+try {
+  reader.close();
+} catch (IOException e) {
+  throw new HiveException(String.format("Unable to close reader for 
%s", filePath), e);
+}
   }
+  LOG.info("ORC merge file input path: " + k.getInputPath());
 
   // store the orc configuration from the first file. All other files 
should
   // match this configuration before merging else will not be merged
@@ -133,7 +140,7 @@ public class OrcFileMergeOperator extends
 .compress(compression)
 .version(fileVersion)
 .rowIndexStride(rowIndexStride)
-.inspector(reader.getObjectInspector());
+.inspector(obi);
 // compression buffer size should only be set if compression is enabled
 if (compression != CompressionKind.NONE) {
   // enforce is required to retain the buffer sizes of old files 
instead of orc writer
@@ -154,14 +161,6 @@ public class OrcFileMergeOperator extends
 return;
   }
 
-  // next file in the path
-  if (!k.getInputPath().equals(prevPath)) {
-if (reader != null) {
-  reader.close();
-}
-reader = OrcFile.createReader(fs, k.getInputPath());
-  }
-
   // initialize buffer to read the entire stripe
   byte[] buffer = new byte[(int) v.getStripeInformation().getLength()];
   fdis = fs.open(k.getInputPath());
@@ -193,15 +192,6 @@ public class OrcFileMergeOperator extends
   if (exception) {
 closeOp(true);
   }
-  if (reader != null) {
-try {
-  reader.close();
-} catch (IOException e) {
-  throw new HiveException(String.format("Unable to close reader for 
%s", filePath), e);
-} finally {
-  reader = null;
-}
-  }
   if (fdis != null) {
 try {
   fdis.close();



[hive] branch master updated: HIVE-27143: Optimize HCatStorer moveTask (#4177)

2023-04-10 Thread daijy
This is an automated email from the ASF dual-hosted git repository.

daijy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
 new 43491dbd75b HIVE-27143: Optimize HCatStorer moveTask (#4177)
43491dbd75b is described below

commit 43491dbd75b83daa755438eb6f43cf6e6b47b1c1
Author: yigress <104102129+yigr...@users.noreply.github.com>
AuthorDate: Mon Apr 10 17:19:38 2023 -0700

HIVE-27143: Optimize HCatStorer moveTask (#4177)

* HIVE-27143: Optimize HCatStorer moveTask

* fix custom dynamic partition
---
 .../mapreduce/FileOutputCommitterContainer.java| 230 +++--
 1 file changed, 123 insertions(+), 107 deletions(-)

diff --git 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
index ef3c1afc457..476c60e53af 100644
--- 
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
+++ 
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
@@ -19,23 +19,34 @@
 
 package org.apache.hive.hcatalog.mapreduce;
 
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import java.io.IOException;
 import java.net.URI;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-
+import java.util.Queue;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import org.apache.commons.lang3.tuple.Pair;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.io.HdfsUtils;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.Warehouse;
@@ -225,6 +236,15 @@ class FileOutputCommitterContainer extends 
OutputCommitterContainer {
 }
   }
 
+  public static final PathFilter HIDDEN_FILES_PATH_FILTER = new PathFilter() {
+@Override
+public boolean accept(Path p) {
+  String name = p.getName();
+  boolean filtered = name.equals(TEMP_DIR_NAME) || 
name.equals(LOGS_DIR_NAME) || name.equals(SUCCEEDED_FILE_NAME);
+  return !filtered;
+}
+  };
+
   public static final String SUCCEEDED_FILE_NAME = "_SUCCESS";
   static final String SUCCESSFUL_JOB_OUTPUT_DIR_MARKER =
 "mapreduce.fileoutputcommitter.marksuccessfuljobs";
@@ -367,10 +387,11 @@ class FileOutputCommitterContainer extends 
OutputCommitterContainer {
   partPath = new Path(finalLocn);
 } else {
   partPath = new Path(partLocnRoot);
+  FileSystem partFs = partPath.getFileSystem(context.getConfiguration());
   int i = 0;
   for (FieldSchema partKey : table.getPartitionKeys()) {
 if (i++ != 0) {
-  fs.mkdirs(partPath); // Attempt to make the path in case it does not 
exist before we check
+  partFs.mkdirs(partPath); // Attempt to make the path in case it does 
not exist before we check
   HdfsUtils.setFullFileStatus(conf, status, 
status.getFileStatus().getGroup(), fs,
   partPath, false);
 }
@@ -380,7 +401,8 @@ class FileOutputCommitterContainer extends 
OutputCommitterContainer {
 
 // Do not need to set the status on the partition directory. We will do it 
later recursively.
 // See: end of the registerPartitions method
-fs.mkdirs(partPath);
+FileSystem partFs = partPath.getFileSystem(context.getConfiguration());
+partFs.mkdirs(partPath);
 
 // Set the location in the StorageDescriptor
 if (dynamicPartitioningUsed) {
@@ -467,131 +489,129 @@ class FileOutputCommitterContainer extends 
OutputCommitterContainer {
 
   /**
* Move all of the files from the temp directory to the final location
-   * @param fs the output file system
-   * @param file the file to move
+   * @param srcf the file to move
* @param srcDir the source directory
* @param destDir the target directory
-   * @param dryRun - a flag that simply tests if this move would succeed or 
not based
-   * on whether other files exist where we're trying to copy
+   * @param immutable - whether table is immutable.
* @throws java.io.IOException
*/
-  private void 

<    1   2   3   4   5   6