Github user pwendell commented on a diff in the pull request:
https://github.com/apache/incubator-spark/pull/585#discussion_r9890019
--- Diff: project/MimaBuild.scala ---
@@ -0,0 +1,115 @@
+import com.typesafe.tools.mima.plugin.MimaKeys.{binaryIssueFilters,
previousArtifact}
+import com.typesafe.tools.mima.plugin.MimaPlugin.mimaDefaultSettings
+
+object MimaBuild {
+
+ val ignoredABIProblems = {
+ import com.typesafe.tools.mima.core._
+ import com.typesafe.tools.mima.core.ProblemFilters._
+ /**
+ * A: Detections likely to become semi private at some point.
+ */
+
Seq(exclude[MissingClassProblem]("org.apache.spark.util.XORShiftRandom"),
+
exclude[MissingClassProblem]("org.apache.spark.util.XORShiftRandom$"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.Utils.cloneWritables"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.nextItem_="),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.objectsRead"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.cleanup"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.objectsRead_="),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.fileStream"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.deserializeStream_="),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.compressedStream"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.nextItem"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.deserializeStream"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.bufferedStream"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.readNextItem"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.eof"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.eof_="),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.this"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#ExternalIterator.StreamBuffer"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#ExternalIterator.sortedMap"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#ExternalIterator.getMorePairs"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#ExternalIterator.mergeIfKeyExists"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#ExternalIterator.mergeHeap"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#ExternalIterator.inputStreams"),
+
exclude[MissingMethodProblem]("org.apache.spark.deploy.DeployMessages#LaunchExecutor.apply"),
+
exclude[MissingMethodProblem]("org.apache.spark.deploy.DeployMessages#LaunchExecutor.apply"),
+
exclude[IncompatibleResultTypeProblem]("org.apache.spark.deploy.ApplicationDescription.sparkHome"),
+
exclude[IncompatibleMethTypeProblem]("org.apache.spark.deploy.ApplicationDescription.this"),
+
exclude[MissingMethodProblem]("org.apache.spark.deploy.DeployMessages#LaunchExecutor.sparkHome"),
+
exclude[MissingMethodProblem]("org.apache.spark.deploy.DeployMessages#LaunchExecutor.copy"),
+
exclude[MissingMethodProblem]("org.apache.spark.deploy.DeployMessages#LaunchExecutor.copy$default$7"),
+
exclude[MissingMethodProblem]("org.apache.spark.deploy.DeployMessages#LaunchExecutor.this"),
+
exclude[MissingMethodProblem]("org.apache.spark.deploy.worker.Worker.sparkHome_="),
+
exclude[IncompatibleResultTypeProblem]("org.apache.spark.deploy.master.Master.registerWorker"),
+
exclude[MissingMethodProblem]("org.apache.spark.deploy.master.Master.launchExecutor"),
+
exclude[MissingMethodProblem]("org.apache.spark.deploy.master.Master.actorToWorker"),
+
exclude[IncompatibleMethTypeProblem]("org.apache.spark.scheduler.TaskSchedulerImpl.handleFailedTask"),
+
exclude[MissingMethodProblem]("org.apache.spark.scheduler.TaskSchedulerImpl.taskSetTaskIds"),
+
exclude[IncompatibleMethTypeProblem]("org.apache.spark.scheduler.TaskSetManager.handleFailedTask"),
+
exclude[MissingTypesProblem]("org.apache.spark.deploy.DeployMessages$LaunchExecutor$"),
+
exclude[MissingMethodProblem]("org.apache.spark.scheduler.TaskSetManager.removeAllRunningTasks"),
+
exclude[MissingMethodProblem]("org.apache.spark.scheduler.TaskSetManager.runningTasks_="),
+
exclude[MissingMethodProblem]("org.apache.spark.scheduler.DAGScheduler.lastFetchFailureTime"),
+
exclude[MissingMethodProblem]("org.apache.spark.scheduler.DAGScheduler.lastFetchFailureTime_="),
+
exclude[MissingMethodProblem]("org.apache.spark.storage.BlockObjectWriter.bytesWritten"))
++
+ /**
+ * B: Detections are mostly false +ve.
+ */
+
Seq(exclude[MissingMethodProblem]("org.apache.spark.SparkContext.newAPIHadoopFile$default$6"),
+
exclude[MissingMethodProblem]("org.apache.spark.SparkContext.hadoopRDD"),
+
exclude[MissingMethodProblem]("org.apache.spark.SparkContext.sequenceFile"),
+
exclude[IncompatibleMethTypeProblem]("org.apache.spark.SparkContext.sequenceFile"),
+
exclude[MissingMethodProblem]("org.apache.spark.SparkContext.sequenceFile"),
+
exclude[MissingMethodProblem]("org.apache.spark.SparkContext.sequenceFile$default$4"),
+
exclude[MissingMethodProblem]("org.apache.spark.SparkContext.newAPIHadoopFile$default$2"),
+
exclude[MissingMethodProblem]("org.apache.spark.SparkContext.newAPIHadoopRDD$default$5"),
+
exclude[IncompatibleMethTypeProblem]("org.apache.spark.SparkContext.hadoopFile"),
+
exclude[MissingMethodProblem]("org.apache.spark.SparkContext.hadoopFile"),
+
exclude[MissingMethodProblem]("org.apache.spark.SparkContext.hadoopFile"),
+
exclude[MissingMethodProblem]("org.apache.spark.SparkContext.sequenceFile$default$3"),
+
exclude[MissingMethodProblem]("org.apache.spark.SparkContext.hadoopFile$default$3"),
+
exclude[MissingMethodProblem]("org.apache.spark.SparkContext.hadoopFile$default$6"),
+
exclude[MissingMethodProblem]("org.apache.spark.SparkContext.sequenceFile$default$5"),
+
exclude[MissingMethodProblem]("org.apache.spark.SparkContext.newAPIHadoopRDD"),
+
exclude[MissingMethodProblem]("org.apache.spark.SparkContext.hadoopFile$default$2"),
+
exclude[MissingMethodProblem]("org.apache.spark.SparkContext.hadoopRDD$default$6"),
+
exclude[MissingMethodProblem]("org.apache.spark.SparkContext.newAPIHadoopFile"),
+
exclude[IncompatibleMethTypeProblem]("org.apache.spark.SparkContext.newAPIHadoopFile"),
+
exclude[MissingMethodProblem]("org.apache.spark.rdd.NewHadoopRDD.this"),
+
exclude[MissingMethodProblem]("org.apache.spark.rdd.HadoopRDD.<init>$default$8"),
+ exclude[MissingClassProblem]("org.apache.spark.rdd.ClassTags$"),
+ exclude[MissingClassProblem]("org.apache.spark.rdd.ClassTags"),
+ exclude[MissingMethodProblem]("org.apache.spark.rdd.HadoopRDD.this"),
+ exclude[MissingMethodProblem]("org.apache.spark.rdd.HadoopRDD.this"),
+
exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaRDDLike.setGenerator"),
+
exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaRDDLike.mapPartitions"),
+
exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaRDDLike.mapPartitions"),
+
exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaRDDLike.mapPartitions"),
+
exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaRDDLike.foreachPartition"),
+
exclude[MissingMethodProblem]("org.apache.spark.api.python.PythonRDD.writeToStream")
+ ) ++
+ /**
+ * Detections I am unsure about. Should be either moved to B (false
+ve) or A.
+ */
+
Seq(exclude[MissingClassProblem]("org.apache.spark.mllib.recommendation.MFDataGenerator$"),
+
exclude[MissingClassProblem]("org.apache.spark.mllib.recommendation.MFDataGenerator"),
+
exclude[MissingClassProblem]("org.apache.spark.mllib.optimization.SquaredGradient"),
+
exclude[IncompatibleResultTypeProblem]("org.apache.spark.mllib.regression.LinearRegressionWithSGD.gradient"),
+
exclude[IncompatibleResultTypeProblem]("org.apache.spark.mllib.regression.RidgeRegressionWithSGD.gradient"),
+
exclude[IncompatibleResultTypeProblem]("org.apache.spark.mllib.regression.LassoWithSGD.gradient"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap.org$apache$spark$util$collection$ExternalAppendOnlyMap$$wrapForCompression$1"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap.org$apache$spark$util$collection$ExternalAppendOnlyMap$$sparkConf"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.shouldCompress"),
+
exclude[MissingMethodProblem]("org.apache.spark.util.collection.ExternalAppendOnlyMap#DiskMapIterator.compressionCodec"),
+
exclude[MissingMethodProblem]("org.apache.spark.api.java.JavaPairRDD.cogroupResultToJava"),
--- End diff --
@aarondav this actually isn't private in master... should it be?
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. To do so, please top-post your response.
If your project does not have this feature enabled and wishes so, or if the
feature is enabled but not working, please contact infrastructure at
[email protected] or file a JIRA ticket with INFRA.
---