Github user viirya commented on a diff in the pull request: https://github.com/apache/spark/pull/16677#discussion_r97710396 --- Diff: sql/core/src/main/scala/org/apache/spark/sql/execution/limit.scala --- @@ -90,25 +95,100 @@ trait BaseLimitExec extends UnaryExecNode with CodegenSupport { } /** - * Take the first `limit` elements of each child partition, but do not collect or shuffle them. + * Take the `limit` elements of the child output. */ -case class LocalLimitExec(limit: Int, child: SparkPlan) extends BaseLimitExec { +case class GlobalLimitExec(limit: Int, child: SparkPlan) extends UnaryExecNode { - override def outputOrdering: Seq[SortOrder] = child.outputOrdering + override def output: Seq[Attribute] = child.output override def outputPartitioning: Partitioning = child.outputPartitioning -} -/** - * Take the first `limit` elements of the child's single output partition. - */ -case class GlobalLimitExec(limit: Int, child: SparkPlan) extends BaseLimitExec { + override def outputOrdering: Seq[SortOrder] = child.outputOrdering - override def requiredChildDistribution: List[Distribution] = AllTuples :: Nil + private val serializer: Serializer = new UnsafeRowSerializer(child.output.size) - override def outputPartitioning: Partitioning = child.outputPartitioning + protected override def doExecute(): RDD[InternalRow] = { + val childRDD = child.execute() + val partitioner = FakePartitioning(child.outputPartitioning, + childRDD.getNumPartitions) + val shuffleDependency = ShuffleExchange.prepareShuffleDependency( + childRDD, child.output, partitioner, serializer) + val numberOfOutput: Seq[Int] = if (shuffleDependency.rdd.getNumPartitions != 0) { + // submitMapStage does not accept RDD with 0 partition. + // So, we will not submit this dependency. + val submittedStageFuture = sparkContext.submitMapStage(shuffleDependency) + submittedStageFuture.get().numberOfOutput.toSeq + } else { + Nil + } - override def outputOrdering: Seq[SortOrder] = child.outputOrdering + // Try to keep child plan's original data parallelism or not. It is enabled by default. + val respectChildParallelism = sqlContext.conf.enableParallelGlobalLimit + + val sumOfOutput = numberOfOutput.sum + if (sumOfOutput <= limit) { + childRDD + } else if (!respectChildParallelism) { + // This is mainly for tests. + // We take the rows of each partition until we reach the required limit number. + var countForRows = 0 + val takeAmounts = new mutable.HashMap[Int, Int]() + numberOfOutput.zipWithIndex.foreach { case (num, index) => + if (countForRows + num < limit) { + countForRows += num + takeAmounts += ((index, num)) + } else { + val toTake = limit - countForRows + countForRows += toTake + takeAmounts += ((index, toTake)) + } + } + val shuffled = new ShuffledRowRDD(shuffleDependency) + shuffled.mapPartitionsWithIndexInternal { case (index, iter) => + takeAmounts.get(index).map { size => + iter.take(size) + }.getOrElse(iter) --- End diff -- Actually we won't reach here, but the change is ok.
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. --- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org