Github user tdas commented on a diff in the pull request:

    https://github.com/apache/spark/pull/16970#discussion_r101871632
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/statefulOperators.scala
 ---
    @@ -321,3 +327,66 @@ case class MapGroupsWithStateExec(
           }
       }
     }
    +
    +
    +/** Physical operator for executing streaming Deduplication. */
    +case class DeduplicationExec(
    +    keyExpressions: Seq[Attribute],
    +    child: SparkPlan,
    +    stateId: Option[OperatorStateId] = None,
    +    eventTimeWatermark: Option[Long] = None)
    +  extends UnaryExecNode with StateStoreWriter with WatermarkSupport {
    +
    +  /** Distribute by grouping attributes */
    +  override def requiredChildDistribution: Seq[Distribution] =
    +    ClusteredDistribution(keyExpressions) :: Nil
    +
    +  override protected def doExecute(): RDD[InternalRow] = {
    +    metrics // force lazy init at driver
    +
    +    child.execute().mapPartitionsWithStateStore(
    +      getStateId.checkpointLocation,
    +      operatorId = getStateId.operatorId,
    +      storeVersion = getStateId.batchId,
    +      keyExpressions.toStructType,
    +      child.output.toStructType,
    +      sqlContext.sessionState,
    +      Some(sqlContext.streams.stateStoreCoordinator)) { (store, iter) =>
    +      val getKey = GenerateUnsafeProjection.generate(keyExpressions, 
child.output)
    +      val numOutputRows = longMetric("numOutputRows")
    +      val numTotalStateRows = longMetric("numTotalStateRows")
    +      val numUpdatedStateRows = longMetric("numUpdatedStateRows")
    +
    +
    +      val baseIterator = watermarkPredicate match {
    +        case Some(predicate) => iter.filter((row: InternalRow) => 
!predicate.eval(row))
    +        case None => iter
    +      }
    +
    +      while (baseIterator.hasNext) {
    +        val row = baseIterator.next().asInstanceOf[UnsafeRow]
    +        val key = getKey(row)
    +        val value = store.get(key)
    +        if (value.isEmpty) {
    +          store.put(key.copy(), row.copy())
    --- End diff --
    
    naah. the HDFSBasedStateStore cant handle nulls. How about using 
`UnsafeRow.createFromByteArray(0, 0)`. We can reused this immutable object.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to