Github user HyukjinKwon commented on a diff in the pull request:

    https://github.com/apache/spark/pull/18659#discussion_r139585473
  
    --- Diff: python/pyspark/serializers.py ---
    @@ -199,6 +211,46 @@ def __repr__(self):
             return "ArrowSerializer"
     
     
    +class ArrowPandasSerializer(ArrowSerializer):
    +    """
    +    Serializes Pandas.Series as Arrow data.
    +    """
    +
    +    def __init__(self):
    +        super(ArrowPandasSerializer, self).__init__()
    +
    +    def dumps(self, series):
    +        """
    +        Make an ArrowRecordBatch from a Pandas Series and serialize. Input 
is a single series or
    +        a list of series accompanied by an optional pyarrow type to coerce 
the data to.
    +        """
    +        import pyarrow as pa
    +        # Make input conform to [(series1, type1), (series2, type2), ...]
    +        if not isinstance(series, (list, tuple)) or \
    +                (len(series) == 2 and isinstance(series[1], pa.DataType)):
    +            series = [series]
    +        series = [(s, None) if not isinstance(s, (list, tuple)) else s for 
s in series]
    --- End diff --
    
    I'd use generator comprehension.


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to