cloud-fan commented on code in PR #52153:
URL: https://github.com/apache/spark/pull/52153#discussion_r2309662848
##########
sql/core/src/main/scala/org/apache/spark/sql/classic/Dataset.scala:
##########
@@ -1544,6 +1544,35 @@ class Dataset[T] private[sql](
}
}
+ /**
+ * Repartitions the Dataset using the specified partition ID expression.
+ *
+ * The number of partitions will be configured by
`spark.sql.shuffle.partitions`.
+ *
+ * @param partitionIdExpr the expression to be used as the partition ID.
Must be an integral type.
+ *
+ * @group typedrel
+ * @since 4.1.0
+ */
+ def repartitionById(partitionIdExpr: Column): Dataset[T] = {
+ repartitionById(sparkSession.sessionState.conf.numShufflePartitions,
partitionIdExpr)
+ }
+
+ /**
+ * Repartitions the Dataset into the given number of partitions using the
specified
+ * partition ID expression.
+ *
+ * @param numPartitions the number of partitions to use.
+ * @param partitionIdExpr the expression to be used as the partition ID.
Must be an integral type.
+ *
+ * @group typedrel
+ * @since 4.1.0
+ */
+ def repartitionById(numPartitions: Int, partitionIdExpr: Column): Dataset[T]
= {
+ val directShufflePartitionIdCol =
Column(DirectShufflePartitionID(partitionIdExpr.expr))
+ repartitionByExpression(Some(numPartitions),
Seq(directShufflePartitionIdCol))
Review Comment:
We can create `RepartitionByExpression` directly with a special flag to
indicate pass through, then we don't need `DirectShufflePartitionID`.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]