Repository: spark
Updated Branches:
  refs/heads/master a350bc16d -> 9b8eca65d


http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/golden/merge2-3-10266e3d5dd4c841c0d65030b1edba7c
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/golden/merge2-3-10266e3d5dd4c841c0d65030b1edba7c 
b/sql/hive/src/test/resources/golden/merge2-3-10266e3d5dd4c841c0d65030b1edba7c
new file mode 100644
index 0000000..573541a
--- /dev/null
+++ 
b/sql/hive/src/test/resources/golden/merge2-3-10266e3d5dd4c841c0d65030b1edba7c
@@ -0,0 +1 @@
+0

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/golden/merge2-3-6e53a3ac93113f20db3a12f1dcf30e86
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/golden/merge2-3-6e53a3ac93113f20db3a12f1dcf30e86 
b/sql/hive/src/test/resources/golden/merge2-3-6e53a3ac93113f20db3a12f1dcf30e86
deleted file mode 100644
index 573541a..0000000
--- 
a/sql/hive/src/test/resources/golden/merge2-3-6e53a3ac93113f20db3a12f1dcf30e86
+++ /dev/null
@@ -1 +0,0 @@
-0

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/golden/merge2-4-84967075baa3e56fff2a23f8ab9ba076
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/golden/merge2-4-84967075baa3e56fff2a23f8ab9ba076 
b/sql/hive/src/test/resources/golden/merge2-4-84967075baa3e56fff2a23f8ab9ba076
deleted file mode 100644
index 573541a..0000000
--- 
a/sql/hive/src/test/resources/golden/merge2-4-84967075baa3e56fff2a23f8ab9ba076
+++ /dev/null
@@ -1 +0,0 @@
-0

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/golden/merge2-4-9cbd6d400fb6c3cd09010e3dbd76601
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/golden/merge2-4-9cbd6d400fb6c3cd09010e3dbd76601 
b/sql/hive/src/test/resources/golden/merge2-4-9cbd6d400fb6c3cd09010e3dbd76601
new file mode 100644
index 0000000..573541a
--- /dev/null
+++ 
b/sql/hive/src/test/resources/golden/merge2-4-9cbd6d400fb6c3cd09010e3dbd76601
@@ -0,0 +1 @@
+0

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/golden/merge2-5-1ba2d6f3bb3348da3fee7fab4f283f34
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/golden/merge2-5-1ba2d6f3bb3348da3fee7fab4f283f34 
b/sql/hive/src/test/resources/golden/merge2-5-1ba2d6f3bb3348da3fee7fab4f283f34
new file mode 100644
index 0000000..573541a
--- /dev/null
+++ 
b/sql/hive/src/test/resources/golden/merge2-5-1ba2d6f3bb3348da3fee7fab4f283f34
@@ -0,0 +1 @@
+0

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/golden/merge2-5-2ee5d706fe3a3bcc38b795f6e94970ea
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/golden/merge2-5-2ee5d706fe3a3bcc38b795f6e94970ea 
b/sql/hive/src/test/resources/golden/merge2-5-2ee5d706fe3a3bcc38b795f6e94970ea
deleted file mode 100644
index 573541a..0000000
--- 
a/sql/hive/src/test/resources/golden/merge2-5-2ee5d706fe3a3bcc38b795f6e94970ea
+++ /dev/null
@@ -1 +0,0 @@
-0

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/golden/parallel-0-23a4feaede17467a8cc26e4d86ec30f9
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/golden/parallel-0-23a4feaede17467a8cc26e4d86ec30f9
 
b/sql/hive/src/test/resources/golden/parallel-0-23a4feaede17467a8cc26e4d86ec30f9
deleted file mode 100644
index 573541a..0000000
--- 
a/sql/hive/src/test/resources/golden/parallel-0-23a4feaede17467a8cc26e4d86ec30f9
+++ /dev/null
@@ -1 +0,0 @@
-0

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/golden/parallel-0-6dc30e2de057022e63bd2a645fbec4c2
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/golden/parallel-0-6dc30e2de057022e63bd2a645fbec4c2
 
b/sql/hive/src/test/resources/golden/parallel-0-6dc30e2de057022e63bd2a645fbec4c2
new file mode 100644
index 0000000..573541a
--- /dev/null
+++ 
b/sql/hive/src/test/resources/golden/parallel-0-6dc30e2de057022e63bd2a645fbec4c2
@@ -0,0 +1 @@
+0

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/golden/rcfile_lazydecompress-11-25715870c569b0f8c3d483e3a38b3199
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/golden/rcfile_lazydecompress-11-25715870c569b0f8c3d483e3a38b3199
 
b/sql/hive/src/test/resources/golden/rcfile_lazydecompress-11-25715870c569b0f8c3d483e3a38b3199
new file mode 100644
index 0000000..573541a
--- /dev/null
+++ 
b/sql/hive/src/test/resources/golden/rcfile_lazydecompress-11-25715870c569b0f8c3d483e3a38b3199
@@ -0,0 +1 @@
+0

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/golden/rcfile_lazydecompress-11-3708198aac609695b22e19e89306034c
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/golden/rcfile_lazydecompress-11-3708198aac609695b22e19e89306034c
 
b/sql/hive/src/test/resources/golden/rcfile_lazydecompress-11-3708198aac609695b22e19e89306034c
deleted file mode 100644
index 573541a..0000000
--- 
a/sql/hive/src/test/resources/golden/rcfile_lazydecompress-11-3708198aac609695b22e19e89306034c
+++ /dev/null
@@ -1 +0,0 @@
-0

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/golden/rcfile_lazydecompress-5-68975193b30cb34102b380e647d8d5f4
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/golden/rcfile_lazydecompress-5-68975193b30cb34102b380e647d8d5f4
 
b/sql/hive/src/test/resources/golden/rcfile_lazydecompress-5-68975193b30cb34102b380e647d8d5f4
deleted file mode 100644
index 573541a..0000000
--- 
a/sql/hive/src/test/resources/golden/rcfile_lazydecompress-5-68975193b30cb34102b380e647d8d5f4
+++ /dev/null
@@ -1 +0,0 @@
-0

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/golden/rcfile_lazydecompress-5-dd959af1968381d0ed90178d349b01a7
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/golden/rcfile_lazydecompress-5-dd959af1968381d0ed90178d349b01a7
 
b/sql/hive/src/test/resources/golden/rcfile_lazydecompress-5-dd959af1968381d0ed90178d349b01a7
new file mode 100644
index 0000000..573541a
--- /dev/null
+++ 
b/sql/hive/src/test/resources/golden/rcfile_lazydecompress-5-dd959af1968381d0ed90178d349b01a7
@@ -0,0 +1 @@
+0

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q
index 235b7c1..6a9a20f 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q
@@ -5,7 +5,7 @@ set hive.auto.convert.join = true;
 
 CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE;
 
-set mapred.job.tracker=localhost:58;
+set mapreduce.jobtracker.address=localhost:58;
 set hive.exec.mode.local.auto=true;
 
 explain

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket5.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket5.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket5.q
index 877f8a5..87f6eca 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket5.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket5.q
@@ -4,7 +4,7 @@ set hive.enforce.sorting = true;
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles = true;
 set hive.merge.mapredfiles = true;
-set mapred.reduce.tasks = 2;
+set mapreduce.job.reduces = 2;
 
 -- Tests that when a multi insert inserts into a bucketed table and a table 
which is not bucketed
 -- the bucketed table is not merged and the table which is not bucketed is

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_num_reducers.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_num_reducers.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_num_reducers.q
index 37ae6cc..84fe391 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_num_reducers.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_num_reducers.q
@@ -1,6 +1,6 @@
 set hive.enforce.bucketing = true;
 set hive.exec.mode.local.auto=false;
-set mapred.reduce.tasks = 10;
+set mapreduce.job.reduces = 10;
 
 -- This test sets number of mapred tasks to 10 for a database with 50 buckets, 
 -- and uses a post-hook to confirm that 10 tasks were created

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q
index d2e12e8..ae72f98 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q
@@ -1,5 +1,5 @@
 set hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-set mapred.min.split.size = 64;
+set mapreduce.input.fileinputformat.split.minsize = 64;
 
 CREATE TABLE T1(name STRING) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine1.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine1.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine1.q
index 86abf09..5ecfc21 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine1.q
@@ -1,11 +1,11 @@
 set hive.exec.compress.output = true;
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-set mapred.min.split.size=256;
-set mapred.min.split.size.per.node=256;
-set mapred.min.split.size.per.rack=256;
-set mapred.max.split.size=256;
+set mapreduce.input.fileinputformat.split.minsize=256;
+set mapreduce.input.fileinputformat.split.minsize.per.node=256;
+set mapreduce.input.fileinputformat.split.minsize.per.rack=256;
+set mapreduce.input.fileinputformat.split.maxsize=256;
 
-set mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec;
+set 
mapreduce.output.fileoutputformat.compress.codec=org.apache.hadoop.io.compress.GzipCodec;
 
 create table combine1_1(key string, value string) stored as textfile;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine2.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine2.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine2.q
index cfd9856..acd0dd5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine2.q
@@ -1,10 +1,10 @@
 USE default;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-set mapred.min.split.size=256;
-set mapred.min.split.size.per.node=256;
-set mapred.min.split.size.per.rack=256;
-set mapred.max.split.size=256;
+set mapreduce.input.fileinputformat.split.minsize=256;
+set mapreduce.input.fileinputformat.split.minsize.per.node=256;
+set mapreduce.input.fileinputformat.split.minsize.per.rack=256;
+set mapreduce.input.fileinputformat.split.maxsize=256;
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
 set mapred.cache.shared.enabled=false;
@@ -18,7 +18,7 @@ set hive.merge.smallfiles.avgsize=0;
 create table combine2(key string) partitioned by (value string);
 
 -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
--- This test sets mapred.max.split.size=256 and hive.merge.smallfiles.avgsize=0
+-- This test sets mapreduce.input.fileinputformat.split.maxsize=256 and 
hive.merge.smallfiles.avgsize=0
 -- in an attempt to force the generation of multiple splits and multiple 
output files.
 -- However, Hadoop 0.20 is incapable of generating splits smaller than the 
block size
 -- when using CombineFileInputFormat, so only one split is generated. This has 
a

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine2_hadoop20.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine2_hadoop20.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine2_hadoop20.q
index 8f9a59d..597d3ae 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine2_hadoop20.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine2_hadoop20.q
@@ -1,10 +1,10 @@
 USE default;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-set mapred.min.split.size=256;
-set mapred.min.split.size.per.node=256;
-set mapred.min.split.size.per.rack=256;
-set mapred.max.split.size=256;
+set mapreduce.input.fileinputformat.split.minsize=256;
+set mapreduce.input.fileinputformat.split.minsize.per.node=256;
+set mapreduce.input.fileinputformat.split.minsize.per.rack=256;
+set mapreduce.input.fileinputformat.split.maxsize=256;
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
 set mapred.cache.shared.enabled=false;
@@ -17,7 +17,7 @@ set hive.merge.smallfiles.avgsize=0;
 create table combine2(key string) partitioned by (value string);
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
--- This test sets mapred.max.split.size=256 and hive.merge.smallfiles.avgsize=0
+-- This test sets mapreduce.input.fileinputformat.split.maxsize=256 and 
hive.merge.smallfiles.avgsize=0
 -- in an attempt to force the generation of multiple splits and multiple 
output files.
 -- However, Hadoop 0.20 is incapable of generating splits smaller than the 
block size
 -- when using CombineFileInputFormat, so only one split is generated. This has 
a

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine2_win.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine2_win.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine2_win.q
index f6090bb..4f7174a 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine2_win.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine2_win.q
@@ -1,8 +1,8 @@
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-set mapred.min.split.size=256;
-set mapred.min.split.size.per.node=256;
-set mapred.min.split.size.per.rack=256;
-set mapred.max.split.size=256;
+set mapreduce.input.fileinputformat.split.minsize=256;
+set mapreduce.input.fileinputformat.split.minsize.per.node=256;
+set mapreduce.input.fileinputformat.split.minsize.per.rack=256;
+set mapreduce.input.fileinputformat.split.maxsize=256;
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
 set mapred.cache.shared.enabled=false;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine3.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine3.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine3.q
index c9afc91..35dd442 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/combine3.q
@@ -1,9 +1,9 @@
 set hive.exec.compress.output = true;
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-set mapred.min.split.size=256;
-set mapred.min.split.size.per.node=256;
-set mapred.min.split.size.per.rack=256;
-set mapred.max.split.size=256;
+set mapreduce.input.fileinputformat.split.minsize=256;
+set mapreduce.input.fileinputformat.split.minsize.per.node=256;
+set mapreduce.input.fileinputformat.split.minsize.per.rack=256;
+set mapreduce.input.fileinputformat.split.maxsize=256;
 
 
 drop table combine_3_srcpart_seq_rc;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_1.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_1.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_1.q
index f348e59..5e51d11 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_1.q
@@ -1,4 +1,4 @@
-set fs.default.name=invalidscheme:///;
+set fs.defaultFS=invalidscheme:///;
 
 CREATE TABLE table1 (a STRING, b STRING) STORED AS TEXTFILE;
 DESCRIBE table1;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas_hadoop20.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas_hadoop20.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas_hadoop20.q
index f39689d..979c907 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas_hadoop20.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas_hadoop20.q
@@ -49,7 +49,7 @@ describe formatted nzhang_CTAS4;
 
 explain extended create table nzhang_ctas5 row format delimited fields 
terminated by ',' lines terminated by '\012' stored as textfile as select key, 
value from src sort by key, value limit 10;
 
-set mapred.job.tracker=localhost:58;
+set mapreduce.jobtracker.address=localhost:58;
 set hive.exec.mode.local.auto=true;
 
 create table nzhang_ctas5 row format delimited fields terminated by ',' lines 
terminated by '\012' stored as textfile as select key, value from src sort by 
key, value limit 10;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1.q
index 1275eab..0d75857 100755
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1.q
@@ -3,12 +3,12 @@ set hive.groupby.skewindata=true;
 
 CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE;
 
-set fs.default.name=invalidscheme:///;
+set fs.defaultFS=invalidscheme:///;
 
 EXPLAIN
 FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, 
sum(substr(src.value,5)) GROUP BY src.key;
 
-set fs.default.name=file:///;
+set fs.defaultFS=file:///;
 
 FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, 
sum(substr(src.value,5)) GROUP BY src.key;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_limit.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_limit.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_limit.q
index 5513333..bbb2859 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_limit.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_limit.q
@@ -1,4 +1,4 @@
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map.q
index dde37df..7883d94 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map_skew.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map_skew.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map_skew.q
index f346cb7..a5ac376 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map_skew.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map_skew.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=true;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_noskew.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_noskew.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_noskew.q
index c587b5f..6341eef 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_noskew.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_noskew.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=false;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_limit.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_limit.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_limit.q
index 3049924..df46934 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_limit.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_limit.q
@@ -1,4 +1,4 @@
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 EXPLAIN
 SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY 
src.key LIMIT 5;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map.q
index 794ec75..7b6e175 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map_multi_distinct.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map_multi_distinct.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map_multi_distinct.q
index 55d1a34..3aeae0d 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map_multi_distinct.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map_multi_distinct.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS 
TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map_skew.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map_skew.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map_skew.q
index 39a2a17..998156d 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map_skew.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map_skew.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=true;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew.q
index 6d7cb61..fab4f5d 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=false;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew_multi_distinct.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew_multi_distinct.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew_multi_distinct.q
index b2450c9..9ef556c 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew_multi_distinct.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew_multi_distinct.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=false;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS 
TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_map.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_map.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_map.q
index 7ecc71d..36ba5d8 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_map.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_map.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 
DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_map_multi_distinct.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_map_multi_distinct.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_map_multi_distinct.q
index 50243be..6f0a963 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_map_multi_distinct.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_map_multi_distinct.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 
DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS 
TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_map_skew.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_map_skew.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_map_skew.q
index 07d10c2..64a49e2 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_map_skew.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_map_skew.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=true;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 
DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_noskew.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_noskew.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_noskew.q
index d33f12c..4fd98ef 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_noskew.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_noskew.q
@@ -1,7 +1,7 @@
 set hive.map.aggr=false;
 
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 
DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_noskew_multi_distinct.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_noskew_multi_distinct.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_noskew_multi_distinct.q
index 86d8986..85ee8ac 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_noskew_multi_distinct.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby3_noskew_multi_distinct.q
@@ -1,7 +1,7 @@
 set hive.map.aggr=false;
 
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 
DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS 
TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_map.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_map.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_map.q
index 8ecce23..d717218 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_map.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_map.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(key INT) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_map_skew.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_map_skew.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_map_skew.q
index eb2001c..d1ecba1 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_map_skew.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_map_skew.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=true;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(key INT) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_noskew.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_noskew.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_noskew.q
index a1ebf90..63530c2 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_noskew.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_noskew.q
@@ -1,7 +1,7 @@
 set hive.map.aggr=false;
 
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_map.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_map.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_map.q
index 4fd6445..4418bbf 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_map.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_map.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(key INT) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_map_skew.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_map_skew.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_map_skew.q
index eccd45d..ef20dac 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_map_skew.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_map_skew.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=true;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(key INT) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_noskew.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_noskew.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_noskew.q
index e96568b..17b322b 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_noskew.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_noskew.q
@@ -1,7 +1,7 @@
 set hive.map.aggr=false;
 
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map.q
index ced122f..bef0eee 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map_skew.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map_skew.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map_skew.q
index 0d3727b..ee93b21 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map_skew.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map_skew.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=true;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_noskew.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_noskew.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_noskew.q
index 466c132..72fff08 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_noskew.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_noskew.q
@@ -1,7 +1,7 @@
 set hive.map.aggr=false;
 
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map.q
index 2b8c5db..75149b1 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map.q
@@ -1,7 +1,7 @@
 set hive.map.aggr=true;
 set hive.multigroupby.singlereducer=false;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
 CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_multi_single_reducer.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_multi_single_reducer.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_multi_single_reducer.q
index 5895ed4..7c7829a 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_multi_single_reducer.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_multi_single_reducer.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
 CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_skew.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_skew.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_skew.q
index ee6d7bf..905986d 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_skew.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_skew.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=true;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
 CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew.q
index 8c2308e..1f63453 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew.q
@@ -1,7 +1,7 @@
 set hive.map.aggr=false;
 set hive.multigroupby.singlereducer=false;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
 CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q
index e673cc6..2ce57e9 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=false;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
 CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map.q
index 0252e99..9def7d6 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
 CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map_skew.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map_skew.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map_skew.q
index b5e1f63..788bc68 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map_skew.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map_skew.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=true;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
 CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_noskew.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_noskew.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_noskew.q
index da85504..17885c5 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_noskew.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_noskew.q
@@ -1,7 +1,7 @@
 set hive.map.aggr=false;
 
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
 CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr.q
index 4a19936..9cb98aa 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr_multi_distinct.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr_multi_distinct.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr_multi_distinct.q
index cb3ee82..841df75 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr_multi_distinct.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr_multi_distinct.q
@@ -1,6 +1,6 @@
 set hive.map.aggr=true;
 set hive.groupby.skewindata=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS 
TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_1.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_1.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_1.q
index 7401a9c..cdf4bb1 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_1.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_1.q
@@ -248,7 +248,7 @@ SELECT * FROM outputTbl4 ORDER BY key1, key2, key3;
 
 set hive.map.aggr=true;
 set hive.multigroupby.singlereducer=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE DEST1(key INT, cnt INT);
 CREATE TABLE DEST2(key INT, val STRING, cnt INT);

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
index db0faa0..1c23fad 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
@@ -249,7 +249,7 @@ SELECT * FROM outputTbl4 ORDER BY key1, key2, key3;
 
 set hive.map.aggr=true;
 set hive.multigroupby.singlereducer=false;
-set mapred.reduce.tasks=31;
+set mapreduce.job.reduces=31;
 
 CREATE TABLE DEST1(key INT, cnt INT);
 CREATE TABLE DEST2(key INT, val STRING, cnt INT);

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/hook_context_cs.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/hook_context_cs.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/hook_context_cs.q
index 94ba148..996c9d9 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/hook_context_cs.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/hook_context_cs.q
@@ -5,7 +5,7 @@ ALTER TABLE vcsc ADD partition (ds='dummy') location 
'${system:test.tmp.dir}/Ver
 set 
hive.exec.pre.hooks=org.apache.hadoop.hive.ql.hooks.VerifyContentSummaryCacheHook;
 SELECT a.c, b.c FROM vcsc a JOIN vcsc b ON a.ds = 'dummy' AND b.ds = 'dummy' 
AND a.c = b.c;
 
-set mapred.job.tracker=local;
+set mapreduce.jobtracker.address=local;
 set hive.exec.pre.hooks = ;
 set 
hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyContentSummaryCacheHook;
 SELECT a.c, b.c FROM vcsc a JOIN vcsc b ON a.ds = 'dummy' AND b.ds = 'dummy' 
AND a.c = b.c;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q
index 728b8cc..5d3c6c4 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q
@@ -63,7 +63,7 @@ set hive.merge.mapredfiles=true;
 set hive.merge.smallfiles.avgsize=200;
 set hive.exec.compress.output=false;
 set hive.exec.dynamic.partition=true;
-set mapred.reduce.tasks=2;
+set mapreduce.job.reduces=2;
 
 -- Tests dynamic partitions where bucketing/sorting can be inferred, but some 
partitions are
 -- merged and some are moved.  Currently neither should be bucketed or sorted, 
in the future,

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_merge.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_merge.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_merge.q
index 41c1a13..aa49b0d 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_merge.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_merge.q
@@ -1,7 +1,7 @@
 set hive.exec.infer.bucket.sort=true;
 set hive.exec.infer.bucket.sort.num.buckets.power.two=true;
 set hive.merge.mapredfiles=true;
-set mapred.reduce.tasks=2;
+set mapreduce.job.reduces=2;
 
 -- This tests inferring how data is bucketed/sorted from the operators in the 
reducer
 -- and populating that information in partitions' metadata.  In particular, 
those cases

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q
index 2255bdb..3a454f7 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q
@@ -1,7 +1,7 @@
 set hive.exec.infer.bucket.sort=true;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
-set mapred.reduce.tasks=2;
+set mapreduce.job.reduces=2;
 
 CREATE TABLE test_table (key INT, value STRING) PARTITIONED BY (ds STRING, hr 
STRING);
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input12_hadoop20.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input12_hadoop20.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input12_hadoop20.q
index 318cd37..31e99e8 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input12_hadoop20.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input12_hadoop20.q
@@ -1,4 +1,4 @@
-set mapred.job.tracker=localhost:58;
+set mapreduce.jobtracker.address=localhost:58;
 set hive.exec.mode.local.auto=true;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input39_hadoop20.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input39_hadoop20.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input39_hadoop20.q
index 29e9fae..362c164 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input39_hadoop20.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input39_hadoop20.q
@@ -15,7 +15,7 @@ select key, value from src;
 
 set hive.test.mode=true;
 set hive.mapred.mode=strict;
-set mapred.job.tracker=localhost:58;
+set mapreduce.jobtracker.address=localhost:58;
 set hive.exec.mode.local.auto=true;
 
 explain
@@ -24,7 +24,7 @@ select count(1) from t1 join t2 on t1.key=t2.key where 
t1.ds='1' and t2.ds='1';
 select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1';
 
 set hive.test.mode=false;
-set mapred.job.tracker;
+set mapreduce.jobtracker.address;
 
 
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input_testsequencefile.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input_testsequencefile.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input_testsequencefile.q
index d992688..2b16c5c 100755
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input_testsequencefile.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input_testsequencefile.q
@@ -1,5 +1,5 @@
-set mapred.output.compress=true;
-set mapred.output.compression.type=BLOCK;
+set mapreduce.output.fileoutputformat.compress=true;
+set mapreduce.output.fileoutputformat.compress.type=BLOCK;
 
 CREATE TABLE dest4_sequencefile(key INT, value STRING) STORED AS SEQUENCEFILE;
 
@@ -10,5 +10,5 @@ INSERT OVERWRITE TABLE dest4_sequencefile SELECT src.key, 
src.value;
 FROM src
 INSERT OVERWRITE TABLE dest4_sequencefile SELECT src.key, src.value;
 
-set mapred.output.compress=false;
+set mapreduce.output.fileoutputformat.compress=false;
 SELECT dest4_sequencefile.* FROM dest4_sequencefile;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join14_hadoop20.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join14_hadoop20.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join14_hadoop20.q
index a12ef1a..b3d75b6 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join14_hadoop20.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join14_hadoop20.q
@@ -2,7 +2,7 @@
 
 CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE;
 
-set mapred.job.tracker=localhost:58;
+set mapreduce.jobtracker.address=localhost:58;
 set hive.exec.mode.local.auto=true;
 
 EXPLAIN

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leftsemijoin_mr.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leftsemijoin_mr.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leftsemijoin_mr.q
index c9ebe0e..d98247b 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leftsemijoin_mr.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leftsemijoin_mr.q
@@ -9,7 +9,7 @@ SELECT * FROM T1;
 SELECT * FROM T2;
 
 set hive.auto.convert.join=false;
-set mapred.reduce.tasks=2;
+set mapreduce.job.reduces=2;
 
 set hive.join.emit.interval=100;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge2.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge2.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge2.q
index 8b77bd2..9189e7c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge2.q
@@ -1,9 +1,9 @@
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
-set mapred.min.split.size=256;
-set mapred.min.split.size.per.node=256;
-set mapred.min.split.size.per.rack=256;
-set mapred.max.split.size=256;
+set mapreduce.input.fileinputformat.split.minsize=256;
+set mapreduce.input.fileinputformat.split.minsize.per.node=256;
+set mapreduce.input.fileinputformat.split.minsize.per.rack=256;
+set mapreduce.input.fileinputformat.split.maxsize=256;
 
 create table test1(key int, val int);
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_createas1.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_createas1.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_createas1.q
index 8726925..dcb2a85 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_createas1.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_createas1.q
@@ -1,5 +1,5 @@
-set mapred.max.split.size=100;
-set mapred.min.split.size=1;
+set mapreduce.input.fileinputformat.split.maxsize=100;
+set mapreduce.input.fileinputformat.split.minsize=1;
 
 DROP TABLE orc_createas1a;
 DROP TABLE orc_createas1b;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_char.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_char.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_char.q
index 1f5f54a..93f8f51 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_char.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_char.q
@@ -1,6 +1,6 @@
 SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET mapred.min.split.size=1000;
-SET mapred.max.split.size=5000;
+SET mapreduce.input.fileinputformat.split.minsize=1000;
+SET mapreduce.input.fileinputformat.split.maxsize=5000;
 
 create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), da date) 
stored as orc tblproperties("orc.stripe.size"="16777216"); 
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_date.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_date.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_date.q
index c34be86..3a74de8 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_date.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_date.q
@@ -1,6 +1,6 @@
 SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET mapred.min.split.size=1000;
-SET mapred.max.split.size=5000;
+SET mapreduce.input.fileinputformat.split.minsize=1000;
+SET mapreduce.input.fileinputformat.split.maxsize=5000;
 
 create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), da date) 
stored as orc tblproperties("orc.stripe.size"="16777216"); 
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_decimal.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_decimal.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_decimal.q
index a93590e..82f68a9 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_decimal.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_decimal.q
@@ -1,6 +1,6 @@
 SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET mapred.min.split.size=1000;
-SET mapred.max.split.size=5000;
+SET mapreduce.input.fileinputformat.split.minsize=1000;
+SET mapreduce.input.fileinputformat.split.maxsize=5000;
 
 create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), da date) 
stored as orc tblproperties("orc.stripe.size"="16777216"); 
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_varchar.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_varchar.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_varchar.q
index 0fecc66..99f58cd 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_varchar.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_varchar.q
@@ -1,6 +1,6 @@
 SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET mapred.min.split.size=1000;
-SET mapred.max.split.size=5000;
+SET mapreduce.input.fileinputformat.split.minsize=1000;
+SET mapreduce.input.fileinputformat.split.maxsize=5000;
 
 create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), da date) 
stored as orc tblproperties("orc.stripe.size"="16777216"); 
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_split_elimination.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_split_elimination.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_split_elimination.q
index 54eb23e..9aa868f 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_split_elimination.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_split_elimination.q
@@ -3,8 +3,8 @@ create table orc_split_elim (userid bigint, string1 string, 
subtype double, deci
 load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_split_elim;
 
 SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET mapred.min.split.size=1000;
-SET mapred.max.split.size=5000;
+SET mapreduce.input.fileinputformat.split.minsize=1000;
+SET mapreduce.input.fileinputformat.split.maxsize=5000;
 SET hive.optimize.index.filter=false;
 
 -- The above table will have 5 splits with the followings stats

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parallel.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parallel.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parallel.q
index 03edeaa..3ac6030 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parallel.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parallel.q
@@ -1,4 +1,4 @@
-set mapred.job.name='test_parallel';
+set mapreduce.job.name='test_parallel';
 set hive.exec.parallel=true;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parallel_orderby.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parallel_orderby.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parallel_orderby.q
index 73c3940..777771f 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parallel_orderby.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parallel_orderby.q
@@ -2,7 +2,7 @@ create table src5 (key string, value string);
 load data local inpath '../../data/files/kv5.txt' into table src5;
 load data local inpath '../../data/files/kv5.txt' into table src5;
 
-set mapred.reduce.tasks = 4;
+set mapreduce.job.reduces = 4;
 set hive.optimize.sampling.orderby=true;
 set hive.optimize.sampling.orderby.percent=0.66f;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_createas1.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_createas1.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_createas1.q
index f362037..14e13c5 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_createas1.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_createas1.q
@@ -1,6 +1,6 @@
 set hive.merge.rcfile.block.level=true;
-set mapred.max.split.size=100;
-set mapred.min.split.size=1;
+set mapreduce.input.fileinputformat.split.maxsize=100;
+set mapreduce.input.fileinputformat.split.minsize=1;
 
 DROP TABLE rcfile_createas1a;
 DROP TABLE rcfile_createas1b;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_lazydecompress.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_lazydecompress.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_lazydecompress.q
index 7f55d10..43a15a0 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_lazydecompress.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_lazydecompress.q
@@ -10,7 +10,7 @@ SELECT key, value FROM rcfileTableLazyDecompress where key > 
238 and key < 400 O
 
 SELECT key, count(1) FROM rcfileTableLazyDecompress where key > 238 group by 
key ORDER BY key ASC;
 
-set mapred.output.compress=true;
+set mapreduce.output.fileoutputformat.compress=true;
 set hive.exec.compress.output=true;
 
 FROM src
@@ -22,6 +22,6 @@ SELECT key, value FROM rcfileTableLazyDecompress where key > 
238 and key < 400 O
 
 SELECT key, count(1) FROM rcfileTableLazyDecompress where key > 238 group by 
key ORDER BY key ASC;
 
-set mapred.output.compress=false;
+set mapreduce.output.fileoutputformat.compress=false;
 set hive.exec.compress.output=false;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge1.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge1.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge1.q
index 1f6f1bd..2507157 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge1.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge1.q
@@ -1,6 +1,6 @@
 set hive.merge.rcfile.block.level=false;
 set hive.exec.dynamic.partition=true;
-set mapred.max.split.size=100;
+set mapreduce.input.fileinputformat.split.maxsize=100;
 set mapref.min.split.size=1;
 
 DROP TABLE rcfile_merge1;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge2.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge2.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge2.q
index 215d5eb..15ffb90 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge2.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge2.q
@@ -1,7 +1,7 @@
 set hive.merge.rcfile.block.level=true;
 set hive.exec.dynamic.partition=true;
-set mapred.max.split.size=100;
-set mapred.min.split.size=1;
+set mapreduce.input.fileinputformat.split.maxsize=100;
+set mapreduce.input.fileinputformat.split.minsize=1;
 
 DROP TABLE rcfile_merge2a;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge3.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge3.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge3.q
index 39fbd25..787ab4a 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge3.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge3.q
@@ -1,6 +1,6 @@
 set hive.merge.rcfile.block.level=true;
-set mapred.max.split.size=100;
-set mapred.min.split.size=1;
+set mapreduce.input.fileinputformat.split.maxsize=100;
+set mapreduce.input.fileinputformat.split.minsize=1;
 
 DROP TABLE rcfile_merge3a;
 DROP TABLE rcfile_merge3b;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge4.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge4.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge4.q
index fe6df28..77ac381 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge4.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_merge4.q
@@ -1,6 +1,6 @@
 set hive.merge.rcfile.block.level=true;
-set mapred.max.split.size=100;
-set mapred.min.split.size=1;
+set mapreduce.input.fileinputformat.split.maxsize=100;
+set mapreduce.input.fileinputformat.split.minsize=1;
 
 DROP TABLE rcfile_merge3a;
 DROP TABLE rcfile_merge3b;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/sample_islocalmode_hook.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/sample_islocalmode_hook.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/sample_islocalmode_hook.q
index 12f2bcd..bf12ba5 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/sample_islocalmode_hook.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/sample_islocalmode_hook.q
@@ -1,8 +1,8 @@
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-set mapred.max.split.size=300;
-set mapred.min.split.size=300;
-set mapred.min.split.size.per.node=300;
-set mapred.min.split.size.per.rack=300;
+set mapreduce.input.fileinputformat.split.maxsize=300;
+set mapreduce.input.fileinputformat.split.minsize=300;
+set mapreduce.input.fileinputformat.split.minsize.per.node=300;
+set mapreduce.input.fileinputformat.split.minsize.per.rack=300;
 set hive.exec.mode.local.auto=true;
 set hive.merge.smallfiles.avgsize=1;
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q
index 484e1fa..5d1bd18 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q
@@ -1,15 +1,15 @@
 USE default;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-set mapred.max.split.size=300;
-set mapred.min.split.size=300;
-set mapred.min.split.size.per.node=300;
-set mapred.min.split.size.per.rack=300;
+set mapreduce.input.fileinputformat.split.maxsize=300;
+set mapreduce.input.fileinputformat.split.minsize=300;
+set mapreduce.input.fileinputformat.split.minsize.per.node=300;
+set mapreduce.input.fileinputformat.split.minsize.per.rack=300;
 set hive.exec.mode.local.auto=true;
 set hive.merge.smallfiles.avgsize=1;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
--- This test sets mapred.max.split.size=300 and hive.merge.smallfiles.avgsize=1
+-- This test sets mapreduce.input.fileinputformat.split.maxsize=300 and 
hive.merge.smallfiles.avgsize=1
 -- in an attempt to force the generation of multiple splits and multiple 
output files.
 -- However, Hadoop 0.20 is incapable of generating splits smaller than the 
block size
 -- when using CombineFileInputFormat, so only one split is generated. This has 
a
@@ -25,7 +25,7 @@ create table sih_src as select key, value from sih_i_part 
order by key, value;
 create table sih_src2 as select key, value from sih_src order by key, value;
 
 set hive.exec.post.hooks = 
org.apache.hadoop.hive.ql.hooks.VerifyIsLocalModeHook ;
-set mapred.job.tracker=localhost:58;
+set mapreduce.jobtracker.address=localhost:58;
 set hive.exec.mode.local.auto.input.files.max=1;
 
 -- Sample split, running locally limited by num tasks

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/split_sample.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/split_sample.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/split_sample.q
index 952eaf7..eb774f1 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/split_sample.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/split_sample.q
@@ -1,14 +1,14 @@
 USE default;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-set mapred.max.split.size=300;
-set mapred.min.split.size=300;
-set mapred.min.split.size.per.node=300;
-set mapred.min.split.size.per.rack=300;
+set mapreduce.input.fileinputformat.split.maxsize=300;
+set mapreduce.input.fileinputformat.split.minsize=300;
+set mapreduce.input.fileinputformat.split.minsize.per.node=300;
+set mapreduce.input.fileinputformat.split.minsize.per.rack=300;
 set hive.merge.smallfiles.avgsize=1;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20)
--- This test sets mapred.max.split.size=300 and hive.merge.smallfiles.avgsize=1
+-- This test sets mapreduce.input.fileinputformat.split.maxsize=300 and 
hive.merge.smallfiles.avgsize=1
 -- in an attempt to force the generation of multiple splits and multiple 
output files.
 -- However, Hadoop 0.20 is incapable of generating splits smaller than the 
block size
 -- when using CombineFileInputFormat, so only one split is generated. This has 
a
@@ -72,10 +72,10 @@ select t1.key as k1, t2.key as k from ss_src1 
tablesample(80 percent) t1 full ou
 
 -- shrink last split
 explain select count(1) from ss_src2 tablesample(1 percent);
-set mapred.max.split.size=300000;
-set mapred.min.split.size=300000;
-set mapred.min.split.size.per.node=300000;
-set mapred.min.split.size.per.rack=300000;
+set mapreduce.input.fileinputformat.split.maxsize=300000;
+set mapreduce.input.fileinputformat.split.minsize=300000;
+set mapreduce.input.fileinputformat.split.minsize.per.node=300000;
+set mapreduce.input.fileinputformat.split.minsize.per.rack=300000;
 select count(1) from ss_src2 tablesample(1 percent);
 select count(1) from ss_src2 tablesample(50 percent);
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_partscan_1.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_partscan_1.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_partscan_1.q
index cdf92e4..caf359c 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_partscan_1.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_partscan_1.q
@@ -2,13 +2,13 @@ set datanucleus.cache.collections=false;
 set hive.stats.autogather=false;
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
-set mapred.min.split.size=256;
-set mapred.min.split.size.per.node=256;
-set mapred.min.split.size.per.rack=256;
-set mapred.max.split.size=256;
+set mapreduce.input.fileinputformat.split.minsize=256;
+set mapreduce.input.fileinputformat.split.minsize.per.node=256;
+set mapreduce.input.fileinputformat.split.minsize.per.rack=256;
+set mapreduce.input.fileinputformat.split.maxsize=256;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
--- This test uses mapred.max.split.size/mapred.max.split.size for controlling
+-- This test uses 
mapreduce.input.fileinputformat.split.maxsize/mapred.max.split.size for 
controlling
 -- number of input splits, which is not effective in hive 0.20.
 -- stats_partscan_1_23.q is the same test with this but has different result.
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_partscan_1_23.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_partscan_1_23.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_partscan_1_23.q
index 1e5f360..0769489 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_partscan_1_23.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_partscan_1_23.q
@@ -2,13 +2,13 @@ set datanucleus.cache.collections=false;
 set hive.stats.autogather=false;
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
-set mapred.min.split.size=256;
-set mapred.min.split.size.per.node=256;
-set mapred.min.split.size.per.rack=256;
-set mapred.max.split.size=256;
+set mapreduce.input.fileinputformat.split.minsize=256;
+set mapreduce.input.fileinputformat.split.minsize.per.node=256;
+set mapreduce.input.fileinputformat.split.minsize.per.rack=256;
+set mapreduce.input.fileinputformat.split.maxsize=256;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- This test uses mapred.max.split.size/mapred.max.split.size for controlling
+-- This test uses 
mapreduce.input.fileinputformat.split.maxsize/mapred.max.split.size for 
controlling
 -- number of input splits.
 -- stats_partscan_1.q is the same test with this but has different result.
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_context_ngrams.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_context_ngrams.q
 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_context_ngrams.q
index f065385..5b5d669 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_context_ngrams.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_context_ngrams.q
@@ -1,6 +1,6 @@
 CREATE TABLE kafka (contents STRING);
 LOAD DATA LOCAL INPATH '../../data/files/text-en.txt' INTO TABLE kafka;
-set mapred.reduce.tasks=1;
+set mapreduce.job.reduces=1;
 set hive.exec.reducers.max=1;
 
 SELECT context_ngrams(sentences(lower(contents)), array(null), 100, 
1000).estfrequency FROM kafka;

http://git-wip-us.apache.org/repos/asf/spark/blob/9b8eca65/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_ngrams.q
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_ngrams.q 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_ngrams.q
index 6a2fde5..39e6e30 100644
--- 
a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_ngrams.q
+++ 
b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_ngrams.q
@@ -1,6 +1,6 @@
 CREATE TABLE kafka (contents STRING);
 LOAD DATA LOCAL INPATH '../../data/files/text-en.txt' INTO TABLE kafka;
-set mapred.reduce.tasks=1;
+set mapreduce.job.reduces=1;
 set hive.exec.reducers.max=1;
 
 SELECT ngrams(sentences(lower(contents)), 1, 100, 1000).estfrequency FROM 
kafka;


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to