HADOOP-12733. Remove references to obsolete io.seqfile configuration variables. 
Contributed by Ray Chiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01d31fe9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01d31fe9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01d31fe9

Branch: refs/heads/HADOOP-13345
Commit: 01d31fe9389ccdc153d7f4bf6574bf8e509867c1
Parents: 87bb1c4
Author: Akira Ajisaka <aajis...@apache.org>
Authored: Wed Jan 4 14:10:36 2017 +0900
Committer: Akira Ajisaka <aajis...@apache.org>
Committed: Wed Jan 4 14:10:36 2017 +0900

----------------------------------------------------------------------
 .../src/main/resources/core-default.xml             | 16 ----------------
 .../hadoop/conf/TestCommonConfigurationFields.java  |  2 --
 .../test/resources/job_1329348432655_0001_conf.xml  |  2 --
 .../src/main/data/2jobs2min-rumen-jh.json           |  6 ------
 4 files changed, 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01d31fe9/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index b4a34db..ee2cc2e 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1262,22 +1262,6 @@
   </description>
 </property>
 
-<property>
-  <name>io.seqfile.lazydecompress</name>
-  <value>true</value>
-  <description>Should values of block-compressed SequenceFiles be decompressed
-          only when necessary.
-  </description>
-</property>
-
-<property>
-  <name>io.seqfile.sorter.recordlimit</name>
-  <value>1000000</value>
-  <description>The limit on number of records to be kept in memory in a spill
-          in SequenceFiles.Sorter
-  </description>
-</property>
-
  <property>
   <name>io.mapfile.bloom.size</name>
   <value>1048576</value>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01d31fe9/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 571dfae..a3a4026 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -124,8 +124,6 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
     configurationPropsToSkipCompare.add("dr.who");
 
     // XML deprecated properties.
-    xmlPropsToSkipCompare.add("io.seqfile.lazydecompress");
-    xmlPropsToSkipCompare.add("io.seqfile.sorter.recordlimit");
     // - org.apache.hadoop.hdfs.client.HdfsClientConfigKeys
     xmlPropsToSkipCompare
         .add("io.bytes.per.checksum");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01d31fe9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
index e4619d6..4c73e8b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
@@ -16,7 +16,6 @@
 <property><!--Loaded from 
job.xml--><name>mapreduce.reduce.shuffle.memory.limit.percent</name><value>0.25</value></property>
 <property><!--Loaded from 
job.xml--><name>hadoop.http.authentication.kerberos.keytab</name><value>${user.home}/hadoop.keytab</value></property>
 <property><!--Loaded from 
job.xml--><name>yarn.nodemanager.keytab</name><value>/etc/krb5.keytab</value></property>
-<property><!--Loaded from 
job.xml--><name>io.seqfile.sorter.recordlimit</name><value>1000000</value></property>
 <property><!--Loaded from 
job.xml--><name>mapreduce.task.io.sort.factor</name><value>10</value></property>
 <property><!--Loaded from 
job.xml--><name>yarn.nodemanager.disk-health-checker.interval-ms</name><value>120000</value></property>
 <property><!--Loaded from 
job.xml--><name>mapreduce.job.working.dir</name><value>hdfs://localhost:8021/user/user</value></property>
@@ -102,7 +101,6 @@
 <property><!--Loaded from 
job.xml--><name>dfs.client.block.write.retries</name><value>3</value></property>
 <property><!--Loaded from 
job.xml--><name>hadoop.proxyuser.user.groups</name><value>users</value></property>
 <property><!--Loaded from 
job.xml--><name>dfs.namenode.name.dir.restore</name><value>false</value></property>
-<property><!--Loaded from 
job.xml--><name>io.seqfile.lazydecompress</name><value>true</value></property>
 <property><!--Loaded from 
job.xml--><name>mapreduce.reduce.merge.inmem.threshold</name><value>1000</value></property>
 <property><!--Loaded from 
job.xml--><name>mapreduce.input.fileinputformat.split.minsize</name><value>0</value></property>
 <property><!--Loaded from 
job.xml--><name>dfs.replication</name><value>3</value></property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01d31fe9/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json 
b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
index 095cfd5..c252539 100644
--- a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
+++ b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
@@ -4545,7 +4545,6 @@
     "hadoop.ssl.keystores.factory.class" : 
"org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory",
     "hadoop.http.authentication.kerberos.keytab" : 
"${user.home}/hadoop.keytab",
     "yarn.nodemanager.keytab" : "/etc/krb5.keytab",
-    "io.seqfile.sorter.recordlimit" : "1000000",
     "mapreduce.task.io.sort.factor" : "10",
     "yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
     "mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins",
@@ -4662,7 +4661,6 @@
     "rpc.engine.org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB" : 
"org.apache.hadoop.ipc.ProtobufRpcEngine",
     "dfs.datanode.hdfs-blocks-metadata.enabled" : "true",
     "ha.zookeeper.parent-znode" : "/hadoop-ha",
-    "io.seqfile.lazydecompress" : "true",
     "mapreduce.reduce.merge.inmem.threshold" : "1000",
     "mapreduce.input.fileinputformat.split.minsize" : "0",
     "dfs.replication" : "3",
@@ -9627,7 +9625,6 @@
     "hadoop.ssl.keystores.factory.class" : 
"org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory",
     "hadoop.http.authentication.kerberos.keytab" : 
"${user.home}/hadoop.keytab",
     "yarn.nodemanager.keytab" : "/etc/krb5.keytab",
-    "io.seqfile.sorter.recordlimit" : "1000000",
     "mapreduce.task.io.sort.factor" : "10",
     "yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
     "mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins",
@@ -9744,7 +9741,6 @@
     "rpc.engine.org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB" : 
"org.apache.hadoop.ipc.ProtobufRpcEngine",
     "dfs.datanode.hdfs-blocks-metadata.enabled" : "true",
     "ha.zookeeper.parent-znode" : "/hadoop-ha",
-    "io.seqfile.lazydecompress" : "true",
     "mapreduce.reduce.merge.inmem.threshold" : "1000",
     "mapreduce.input.fileinputformat.split.minsize" : "0",
     "dfs.replication" : "3",
@@ -10209,7 +10205,6 @@
 "hadoop.ssl.keystores.factory.class" : 
"org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory",
 "hadoop.http.authentication.kerberos.keytab" : "${user.home}/hadoop.keytab",
 "yarn.nodemanager.keytab" : "/etc/krb5.keytab",
-"io.seqfile.sorter.recordlimit" : "1000000",
 "mapreduce.task.io.sort.factor" : "10",
 "yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
 "mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins",
@@ -10327,7 +10322,6 @@
 "rpc.engine.org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB" : 
"org.apache.hadoop.ipc.ProtobufRpcEngine",
 "dfs.datanode.hdfs-blocks-metadata.enabled" : "true",
 "ha.zookeeper.parent-znode" : "/hadoop-ha",
-"io.seqfile.lazydecompress" : "true",
 "mapreduce.reduce.merge.inmem.threshold" : "1000",
 "mapreduce.input.fileinputformat.split.minsize" : "0",
 "dfs.replication" : "3",


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to