Github user rxin commented on a diff in the pull request: https://github.com/apache/spark/pull/2455#discussion_r18122185 --- Diff: core/src/main/scala/org/apache/spark/util/random/RandomSampler.scala --- @@ -53,56 +81,237 @@ trait RandomSampler[T, U] extends Pseudorandom with Cloneable with Serializable * @tparam T item type */ @DeveloperApi -class BernoulliSampler[T](lb: Double, ub: Double, complement: Boolean = false) +class BernoulliPartitionSampler[T](lb: Double, ub: Double, complement: Boolean = false) extends RandomSampler[T, T] { - private[random] var rng: Random = new XORShiftRandom + // epsilon slop to avoid failure from floating point jitter + @transient val eps: Double = RandomSampler.epsArgs + require(lb <= (ub + eps), "Lower bound (lb) must be <= upper bound (ub)") + require(lb >= (0d - eps), "Lower bound (lb) must be >= 0.0") + require(ub <= (1d + eps), "Upper bound (ub) must be <= 1.0") - def this(ratio: Double) = this(0.0d, ratio) + private val rng: Random = new XORShiftRandom override def setSeed(seed: Long) = rng.setSeed(seed) override def sample(items: Iterator[T]): Iterator[T] = { - items.filter { item => - val x = rng.nextDouble() - (x >= lb && x < ub) ^ complement + ub-lb match { --- End diff -- i think this is easier to read if you do ```scala if (ub <= lb) { if (complement) items else Iterator.empty } else { if (complement) { items.filter { item => val x = rng.nextDouble() (x < lb) || (x >= ub) } } else { items.filter { _ => val x = rng.nextDouble() (x >= lb) && (x < ub) } } } ```
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. --- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org