Updated Branches:
  refs/heads/master 6eef78d76 -> 5cae05f59

Remove calls to deprecated mapred's OutputCommitter.cleanupJob because since 
Hadoop 1.0.4
the mapred OutputCommitter.commitJob should do cleanup job.

In fact the implementation of mapred OutputCommitter.commitJob looks like this:

  public void commitJob(JobContext jobContext) throws IOException {
    cleanupJob(jobContext);
  }

(The jobContext input argument is type of org.apache.hadoop.mapred.JobContext)


Project: http://git-wip-us.apache.org/repos/asf/incubator-spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-spark/commit/4517326e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-spark/tree/4517326e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-spark/diff/4517326e

Branch: refs/heads/master
Commit: 4517326ec68d15f7d9bedade5f9bba33d760e6b4
Parents: 61674bc
Author: Henry Saputra <hsapu...@apache.org>
Authored: Tue Jan 7 22:55:56 2014 -0800
Committer: Henry Saputra <hsapu...@apache.org>
Committed: Tue Jan 7 22:55:56 2014 -0800

----------------------------------------------------------------------
 core/src/main/scala/org/apache/spark/SparkHadoopWriter.scala    | 4 ----
 core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala | 2 --
 2 files changed, 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/4517326e/core/src/main/scala/org/apache/spark/SparkHadoopWriter.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/SparkHadoopWriter.scala 
b/core/src/main/scala/org/apache/spark/SparkHadoopWriter.scala
index 103a1c2..618d950 100644
--- a/core/src/main/scala/org/apache/spark/SparkHadoopWriter.scala
+++ b/core/src/main/scala/org/apache/spark/SparkHadoopWriter.scala
@@ -127,10 +127,6 @@ class SparkHadoopWriter(@transient jobConf: JobConf)
     cmtr.commitJob(getJobContext())
   }
 
-  def cleanup() {
-    getOutputCommitter().cleanupJob(getJobContext())
-  }
-
   // ********* Private Functions *********
 
   private def getOutputFormat(): OutputFormat[AnyRef,AnyRef] = {

http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/4517326e/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala 
b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
index 04a8d05..629fb39 100644
--- a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
@@ -638,7 +638,6 @@ class PairRDDFunctions[K: ClassTag, V: ClassTag](self: 
RDD[(K, V)])
     jobCommitter.setupJob(jobTaskContext)
     val count = self.context.runJob(self, writeShard _).sum
     jobCommitter.commitJob(jobTaskContext)
-    jobCommitter.cleanupJob(jobTaskContext)
   }
 
   /**
@@ -728,7 +727,6 @@ class PairRDDFunctions[K: ClassTag, V: ClassTag](self: 
RDD[(K, V)])
 
     self.context.runJob(self, writeToFile _)
     writer.commitJob()
-    writer.cleanup()
   }
 
   /**

Reply via email to