EnricoMi commented on code in PR #48038:
URL: https://github.com/apache/spark/pull/48038#discussion_r1774820310


##########
python/pyspark/worker.py:
##########
@@ -333,17 +336,38 @@ def wrap_cogrouped_map_arrow_udf(f, return_type, argspec, 
runner_conf):
             (col.name, to_arrow_type(col.dataType)) for col in 
return_type.fields
         ]
 
-    def wrapped(left_key_table, left_value_table, right_key_table, 
right_value_table):
-        if len(argspec.args) == 2:
-            result = f(left_value_table, right_value_table)
-        elif len(argspec.args) == 3:
-            key_table = left_key_table if left_key_table.num_rows > 0 else 
right_key_table
-            key = tuple(c[0] for c in key_table.columns)
-            result = f(key, left_value_table, right_value_table)
-
-        verify_arrow_result(result, _assign_cols_by_name, 
expected_cols_and_types)
+    def wrapped(left_key_batch, left_value_batches, right_key_batch, 
right_value_batches):
+        if is_generator:
+            if len(argspec.args) == 2:
+                result = f(left_value_batches, right_value_batches)
+            elif len(argspec.args) == 3:
+                key_batch = left_key_batch if left_key_batch.num_rows > 0 else 
right_key_batch
+                key = tuple(c[0] for c in key_batch.columns)
+                result = f(key, left_value_batches, right_value_batches)
+
+            def verify_element(batch):
+                verify_arrow_batch(batch, _assign_cols_by_name, 
expected_cols_and_types)
+                return batch
+
+            yield from map(verify_element, result)
+            # Make sure both iterators are fully consumed
+            for _ in left_value_batches:
+                pass
+            for _ in right_value_batches:
+                pass
 
-        return result.to_batches()
+        else:

Review Comment:
   We could get rid of this code duplication (the whole `False` branch) by 
turning `f` into an iterator function if `is_generator` is `False` (though the 
`len(argspec.args)` still duplicates some code).
   
   Add this right after the `import pyarrow as pa`:
   
       if not is_generator:
           def as_iterator(left: Iterator[pa.RecordBatch], right: 
Iterator[pa.RecordBatch]) -> Iterator[pa.RecordBatch]:
               left_value_table = pa.Table.from_batches(left_value_batches)
               right_value_table = pa.Table.from_batches(right_value_batches)
               result = f(left_value_table, right_value_table)
               verify_arrow_table(result, _assign_cols_by_name, 
expected_cols_and_types)
               yield from result.to_batches()
   
           def as_iterator_with_key(key, left: Iterator[pa.RecordBatch], right: 
Iterator[pa.RecordBatch]) -> Iterator[pa.RecordBatch]:
               left_value_table = pa.Table.from_batches(left_value_batches)
               right_value_table = pa.Table.from_batches(right_value_batches)
               result = f(key, left_value_table, right_value_table)
               verify_arrow_table(result, _assign_cols_by_name, 
expected_cols_and_types)
               yield from result.to_batches()
   
           if len(argspec.args) == 2:
               f = as_iterator
           elif len(argspec.args) == 3:
               f = as_iterator_with_key
   
   Then, we don't need to check `is_generated` in `wrapped`.



##########
python/pyspark/worker.py:
##########
@@ -333,17 +336,38 @@ def wrap_cogrouped_map_arrow_udf(f, return_type, argspec, 
runner_conf):
             (col.name, to_arrow_type(col.dataType)) for col in 
return_type.fields
         ]
 
-    def wrapped(left_key_table, left_value_table, right_key_table, 
right_value_table):
-        if len(argspec.args) == 2:
-            result = f(left_value_table, right_value_table)
-        elif len(argspec.args) == 3:
-            key_table = left_key_table if left_key_table.num_rows > 0 else 
right_key_table
-            key = tuple(c[0] for c in key_table.columns)
-            result = f(key, left_value_table, right_value_table)
-
-        verify_arrow_result(result, _assign_cols_by_name, 
expected_cols_and_types)
+    def wrapped(left_key_batch, left_value_batches, right_key_batch, 
right_value_batches):
+        if is_generator:
+            if len(argspec.args) == 2:
+                result = f(left_value_batches, right_value_batches)
+            elif len(argspec.args) == 3:
+                key_batch = left_key_batch if left_key_batch.num_rows > 0 else 
right_key_batch
+                key = tuple(c[0] for c in key_batch.columns)
+                result = f(key, left_value_batches, right_value_batches)
+
+            def verify_element(batch):
+                verify_arrow_batch(batch, _assign_cols_by_name, 
expected_cols_and_types)
+                return batch
+
+            yield from map(verify_element, result)
+            # Make sure both iterators are fully consumed

Review Comment:
   can we somehow close the iterators to stop transferring unused data?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to