Github user falaki commented on a diff in the pull request:

    https://github.com/apache/spark/pull/1025#discussion_r14622781
  
    --- Diff: 
core/src/main/scala/org/apache/spark/util/random/StratifiedSampler.scala ---
    @@ -0,0 +1,335 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.spark.util.random
    +
    +import scala.collection.Map
    +import scala.collection.mutable.{ArrayBuffer, HashMap, Map => MMap}
    +import scala.reflect.ClassTag
    +
    +import org.apache.commons.math3.random.RandomDataGenerator
    +import org.apache.spark.{Logging, SparkContext, TaskContext}
    +import org.apache.spark.rdd.RDD
    +import org.apache.spark.util.Utils
    +
    +/**
    + * Auxiliary functions and data structures for the sampleByKey method in 
PairRDDFunctions.
    + *
    + * For more theoretical background on the sampling technqiues used here, 
please refer to
    + * http://jmlr.org/proceedings/papers/v28/meng13a.html
    + */
    +private[spark] object StratifiedSampler extends Logging {
    +
    +  /**
    +   * A version of {@link #aggregate()} that passes the TaskContext to the 
function that does
    +   * aggregation for each partition. This function avoids creating an 
extra depth in the RDD
    +   * lineage, as opposed to using mapPartitionsWithIndex, which results in 
slightly improved
    +   * run time.
    +   */
    +  def aggregateWithContext[U: ClassTag, T: ClassTag](zeroValue: U)
    +      (rdd: RDD[T],
    +       seqOp: ((TaskContext, U), T) => U,
    +       combOp: (U, U) => U): U = {
    +    val sc: SparkContext = rdd.sparkContext
    +    // Clone the zero value since we will also be serializing it as part 
of tasks
    +    var jobResult = Utils.clone(zeroValue, 
sc.env.closureSerializer.newInstance())
    +    // pad seqOp and combOp with taskContext to conform to aggregate's 
signature in TraversableOnce
    +    val paddedSeqOp = (arg1: (TaskContext, U), item: T) => (arg1._1, 
seqOp(arg1, item))
    +    val paddedCombOp = (arg1: (TaskContext, U), arg2: (TaskContext, U)) =>
    +      (arg1._1, combOp(arg1._2, arg1._2))
    +    val cleanSeqOp = sc.clean(paddedSeqOp)
    +    val cleanCombOp = sc.clean(paddedCombOp)
    +    val aggregatePartition = (tc: TaskContext, it: Iterator[T]) =>
    +      (it.aggregate(tc, zeroValue)(cleanSeqOp, cleanCombOp))._2
    +    val mergeResult = (index: Int, taskResult: U) => jobResult = 
combOp(jobResult, taskResult)
    +    sc.runJob(rdd, aggregatePartition, mergeResult)
    +    jobResult
    +  }
    +
    +  /**
    +   * Returns the function used by aggregate to collect sampling statistics 
for each partition.
    +   */
    +  def getSeqOp[K, V](withReplacement: Boolean,
    +      fractions: Map[K, Double],
    +      counts: Option[Map[K, Long]]): ((TaskContext, Result[K]), (K, V)) => 
Result[K] = {
    +    val delta = 5e-5
    +    (output: (TaskContext, Result[K]), item: (K, V)) => {
    +      val result = output._2
    +      val tc = output._1
    +      val rng = result.getRand(tc.partitionId)
    +      val fraction = fractions(item._1)
    +      val stratum = result.getEntry(item._1)
    +      if (withReplacement) {
    +        // compute acceptBound and waitListBound only if they haven't been 
computed already
    +        // since they don't change from iteration to iteration.
    +        // TODO change this to the streaming version
    +        if (stratum.areBoundsEmpty) {
    +          val n = counts.get(item._1)
    +          val sampleSize = math.ceil(n * fraction).toLong
    +          val lmbd1 = PoissonBounds.getLowerBound(sampleSize)
    +          val minCount = PoissonBounds.getMinCount(lmbd1)
    +          val lmbd2 = if (lmbd1 == 0) {
    +            PoissonBounds.getUpperBound(sampleSize)
    +          } else {
    +            PoissonBounds.getUpperBound(sampleSize - minCount)
    +          }
    +          stratum.acceptBound = lmbd1 / n
    +          stratum.waitListBound = lmbd2 / n
    +        }
    +        val x1 = if (stratum.acceptBound == 0.0) 0L else 
rng.nextPoisson(stratum.acceptBound)
    +        if (x1 > 0) {
    +          stratum.incrNumAccepted(x1)
    +        }
    +        val x2 = rng.nextPoisson(stratum.waitListBound).toInt
    +        if (x2 > 0) {
    +          stratum.addToWaitList(ArrayBuffer.fill(x2)(rng.nextUniform(0.0, 
1.0)))
    +        }
    +      } else {
    +        // We use the streaming version of the algorithm for sampling 
without replacement to avoid
    +        // using an extra pass over the RDD for computing the count.
    +        // Hence, acceptBound and waitListBound change on every iteration.
    +        val g1 = - math.log(delta) / stratum.numItems // gamma1
    +        val g2 = (2.0 / 3.0) * g1 // gamma 2
    +        stratum.acceptBound = math.max(0, fraction + g2 - math.sqrt((g2 * 
g2 + 3 * g2 * fraction)))
    +        stratum.waitListBound = math.min(1, fraction + g1 + math.sqrt(g1 * 
g1 + 2 * g1 * fraction))
    +
    +        val x = rng.nextUniform(0.0, 1.0)
    +        if (x < stratum.acceptBound) {
    +          stratum.incrNumAccepted()
    +        } else if (x < stratum.waitListBound) {
    +          stratum.addToWaitList(x)
    +        }
    +      }
    +      stratum.incrNumItems()
    +      result
    +    }
    +  }
    +
    +  /**
    +   * Returns the function used by aggregate to combine results from 
different partitions, as
    +   * returned by seqOp.
    +   */
    +  def getCombOp[K](): (Result[K], Result[K]) => Result[K] = {
    +    (r1: Result[K], r2: Result[K]) => {
    +      // take union of both key sets in case one partition doesn't contain 
all keys
    +      val keyUnion = r1.resultMap.keySet.union(r2.resultMap.keySet)
    +
    +      // Use r2 to keep the combined result since r1 is usual empty
    +      for (key <- keyUnion) {
    +        val entry1 = r1.resultMap.get(key)
    +        if (r2.resultMap.contains(key)) {
    +          r2.resultMap(key).merge(entry1)
    +        } else {
    +          r2.addEntry(key, entry1)
    +        }
    +      }
    +      r2
    +    }
    +  }
    +
    +  /**
    +   * Given the result returned by the aggregate function, determine the 
threshold for accepting
    +   * items to generate exact sample size.
    +   *
    +   * To do so, we compute sampleSize = math.ceil(size * samplingRate) for 
each stratum and compare
    +   * it to the number of items that were accepted instantly and the number 
of items in the waitlist
    +   * for that stratum. Most of the time, numAccepted <= sampleSize <= 
(numAccepted + numWaitlisted),
    +   * which means we need to sort the elements in the waitlist by their 
associated values in order
    +   * to find the value T s.t. |{elements in the stratum whose associated 
values <= T}| = sampleSize.
    +   * Note that all elements in the waitlist have values >= bound for 
instant accept, so a T value
    +   * in the waitlist range would allow all elements that were instantly 
accepted on the first pass
    +   * to be included in the sample.
    +   */
    +  def computeThresholdByKey[K](finalResult: Map[K, Stratum],
    +      fractions: Map[K, Double]):
    --- End diff --
    
    Style nit:  Return type of the function is in a new line. We can join these 
two lines.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to