Github user MLnick commented on a diff in the pull request: https://github.com/apache/spark/pull/11919#discussion_r61767887 --- Diff: mllib/src/test/scala/org/apache/spark/ml/recommendation/ALSSuite.scala --- @@ -520,6 +488,81 @@ class ALSSuite } } +class ALSCleanerSuite extends SparkFunSuite { + test("Clean shuffles") { + val conf = new SparkConf() + val localDir = Utils.createTempDir() + val checkpointDir = Utils.createTempDir() + def getAllFiles: Set[File] = + FileUtils.listFiles(localDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE).asScala.toSet + try { + conf.set("spark.local.dir", localDir.getAbsolutePath) + conf.set("spark.shuffle.manager", "sort") + val sc = new SparkContext("local[2]", "test", conf) + try { + sc.setCheckpointDir(checkpointDir.getAbsolutePath) + // Test checkpoint and clean parents + val input = sc.parallelize(1 to 1000) + val keyed = input.map(x => (x % 20, 1)) + val shuffled = keyed.reduceByKey(_ + _) + val keysOnly = shuffled.keys + val deps = keysOnly.dependencies + keysOnly.count() + ALS.cleanShuffleDependencies(sc, deps, true) + val resultingFiles = getAllFiles + assert(resultingFiles === Set()) + // Ensure running count again works fine even if we kill the shuffle files. + keysOnly.count() + } finally { + sc.stop() + } + } finally { + Utils.deleteRecursively(localDir) + Utils.deleteRecursively(checkpointDir) + } + } + + test("ALS shuffle cleanup") { + val conf = new SparkConf() + val localDir = Utils.createTempDir() + val checkpointDir = Utils.createTempDir() + def getAllFiles: Set[File] = + FileUtils.listFiles(localDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE).asScala.toSet + try { + conf.set("spark.local.dir", localDir.getAbsolutePath) + conf.set("spark.shuffle.manager", "sort") + val sc = new SparkContext("local[2]", "test", conf) + try { + sc.setCheckpointDir(checkpointDir.getAbsolutePath) + // Generate test data + val (training, _) = ALSSuite.genImplicitTestData(sc, 100, 10, 1, 0.2, 0) + // Implicitly test the cleaning of parents during ALS training + val sqlContext = new SQLContext(sc) + import sqlContext.implicits._ + val als = new ALS() + .setRank(1) + .setRegParam(1e-5) + .setSeed(0) + .setCheckpointInterval(1) + .setMaxIter(50) --- End diff -- What was wrong initially with the default max iter of 10? On Mon, 2 May 2016 at 18:48, Xiangrui Meng <notificati...@github.com> wrote: > In mllib/src/test/scala/org/apache/spark/ml/recommendation/ALSSuite.scala > <https://github.com/apache/spark/pull/11919#discussion_r61765218>: > > > + conf.set("spark.local.dir", localDir.getAbsolutePath) > > + conf.set("spark.shuffle. > > manager", "sort") > > + val sc = new SparkContext("local[2]", "test", conf) > > + try { > > + sc.setCheckpointDir(checkpointDir.getAbsolutePath) > > + // Generate test data > > + val (training, _) = ALSSuite.genImplicitTestData(sc, 100, 10, 1, 0.2, 0) > > + // Implicitly test the cleaning of parents during ALS training > > + val sqlContext = new SQLContext(sc) > > + import sqlContext.implicits._ > > + val als = new ALS() > > + .setRank(1) > > + .setRegParam(1e-5) > > + .setSeed(0) > > + .setCheckpointInterval(1) > > + .setMaxIter(50) > > This test takes 20 seconds to run on my local. Please optimize the time > required. > > â > You are receiving this because you were mentioned. > Reply to this email directly or view it on GitHub > <https://github.com/apache/spark/pull/11919/files/3a18915ee9c803deb494eee77ba960646e2d470c#r61765218> >
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. --- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org