Yikun commented on code in PR #37923: URL: https://github.com/apache/spark/pull/37923#discussion_r973821134
########## python/pyspark/pandas/groupby.py: ########## @@ -993,6 +993,98 @@ def nth(self, n: int) -> FrameLike: return self._prepare_return(DataFrame(internal)) + def prod(self, numeric_only: Optional[bool] = True, min_count: int = 0): + """ + Compute prod of groups. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. If None, will attempt to use + everything, then use only numeric data. + + min_count: int, default 0 + The required number of valid values to perform the operation. + If fewer than min_count non-NA values are present the result will be NA. + + .. versionadded:: 3.4.0 + + Returns + ------- + pyspark.pandas.Series or pyspark.pandas.DataFrame + + See Also + -------- + pyspark.pandas.Series.groupby + pyspark.pandas.DataFrame.groupby + + Examples + -------- + >>> df = ps.DataFrame({'A': [1, 1, 2, 1, 2], + ... 'B': [np.nan, 2, 3, 4, 5], + ... 'C': [1, 2, 1, 1, 2], + ... 'D': [True, False, True, False, True]}) + + Groupby one column and return the prod of the remaining columns in + each group. + + >>> df.groupby('A').prod().sort_index() + B C D + A + 1 8.0 2 0 + 2 15.0 2 11 + + >>> df.groupby('A').prod(min_count=3).sort_index() + B C D + A + 1 NaN 2 0 Review Comment: ```suggestion 1 NaN 2.0 0.0 ``` ########## python/pyspark/pandas/groupby.py: ########## @@ -61,7 +61,7 @@ NumericType, StructField, StructType, - StringType, + StringType, IntegralType, Review Comment: ```suggestion StringType, IntegralType, ``` ########## python/pyspark/pandas/groupby.py: ########## @@ -993,6 +993,98 @@ def nth(self, n: int) -> FrameLike: return self._prepare_return(DataFrame(internal)) + def prod(self, numeric_only: Optional[bool] = True, min_count: int = 0): + """ + Compute prod of groups. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. If None, will attempt to use + everything, then use only numeric data. + + min_count: int, default 0 + The required number of valid values to perform the operation. + If fewer than min_count non-NA values are present the result will be NA. + + .. versionadded:: 3.4.0 + + Returns + ------- + pyspark.pandas.Series or pyspark.pandas.DataFrame + + See Also + -------- + pyspark.pandas.Series.groupby + pyspark.pandas.DataFrame.groupby + + Examples + -------- + >>> df = ps.DataFrame({'A': [1, 1, 2, 1, 2], + ... 'B': [np.nan, 2, 3, 4, 5], + ... 'C': [1, 2, 1, 1, 2], + ... 'D': [True, False, True, False, True]}) + + Groupby one column and return the prod of the remaining columns in + each group. + + >>> df.groupby('A').prod().sort_index() + B C D + A + 1 8.0 2 0 + 2 15.0 2 11 + + >>> df.groupby('A').prod(min_count=3).sort_index() + B C D + A + 1 NaN 2 0 + 2 NaN NaN NaN + """ + + self._validate_agg_columns(numeric_only=numeric_only, function_name="prod") + + groupkey_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(self._groupkeys))] + internal, agg_columns, sdf = self._prepare_reduce( + groupkey_names=groupkey_names, + accepted_spark_types=(NumericType, BooleanType), + bool_to_numeric=True, + ) + + psdf: DataFrame = DataFrame(internal) + if len(psdf._internal.column_labels) > 0: + + stat_exprs = [] + for label in psdf._internal.column_labels: + psser = psdf._psser_for(label) + column = psser._dtype_op.nan_to_null(psser).spark.column + data_type = psser.spark.data_type + + if isinstance(data_type, IntegralType): + stat_exprs.append(F.product(column).cast(data_type).alias(f"{label[0]}")) + else: + stat_exprs.append(F.product(column).alias(f"{label[0]}")) + + stat_exprs.append(F.count(column).alias(f"{label[0]}_count")) Review Comment: Looks like we also don't need this helper column if min_count==0, right? ########## python/pyspark/pandas/groupby.py: ########## @@ -993,6 +993,98 @@ def nth(self, n: int) -> FrameLike: return self._prepare_return(DataFrame(internal)) + def prod(self, numeric_only: Optional[bool] = True, min_count: int = 0): + """ + Compute prod of groups. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. If None, will attempt to use + everything, then use only numeric data. + + min_count: int, default 0 + The required number of valid values to perform the operation. + If fewer than min_count non-NA values are present the result will be NA. + + .. versionadded:: 3.4.0 + + Returns + ------- + pyspark.pandas.Series or pyspark.pandas.DataFrame + + See Also + -------- + pyspark.pandas.Series.groupby + pyspark.pandas.DataFrame.groupby + + Examples + -------- + >>> df = ps.DataFrame({'A': [1, 1, 2, 1, 2], + ... 'B': [np.nan, 2, 3, 4, 5], + ... 'C': [1, 2, 1, 1, 2], + ... 'D': [True, False, True, False, True]}) + + Groupby one column and return the prod of the remaining columns in + each group. + + >>> df.groupby('A').prod().sort_index() + B C D + A + 1 8.0 2 0 + 2 15.0 2 11 Review Comment: ```suggestion 2 15.0 2 1 ``` ########## python/pyspark/pandas/groupby.py: ########## @@ -993,6 +993,98 @@ def nth(self, n: int) -> FrameLike: return self._prepare_return(DataFrame(internal)) + def prod(self, numeric_only: Optional[bool] = True, min_count: int = 0): + """ + Compute prod of groups. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. If None, will attempt to use + everything, then use only numeric data. Review Comment: ```suggestion numeric_only : bool, default False Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. ``` ########## python/pyspark/pandas/groupby.py: ########## @@ -993,6 +993,98 @@ def nth(self, n: int) -> FrameLike: return self._prepare_return(DataFrame(internal)) + def prod(self, numeric_only: Optional[bool] = True, min_count: int = 0): + """ + Compute prod of groups. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. If None, will attempt to use + everything, then use only numeric data. + + min_count: int, default 0 + The required number of valid values to perform the operation. + If fewer than min_count non-NA values are present the result will be NA. Review Comment: ```suggestion min_count: int, default 0 The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. ``` ########## python/pyspark/pandas/groupby.py: ########## @@ -993,6 +993,98 @@ def nth(self, n: int) -> FrameLike: return self._prepare_return(DataFrame(internal)) + def prod(self, numeric_only: Optional[bool] = True, min_count: int = 0): + """ + Compute prod of groups. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. If None, will attempt to use + everything, then use only numeric data. + + min_count: int, default 0 + The required number of valid values to perform the operation. + If fewer than min_count non-NA values are present the result will be NA. + + .. versionadded:: 3.4.0 Review Comment: You might want to move this before `Parameters` ########## python/pyspark/pandas/groupby.py: ########## @@ -993,6 +993,98 @@ def nth(self, n: int) -> FrameLike: return self._prepare_return(DataFrame(internal)) + def prod(self, numeric_only: Optional[bool] = True, min_count: int = 0): + """ + Compute prod of groups. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. If None, will attempt to use + everything, then use only numeric data. + + min_count: int, default 0 + The required number of valid values to perform the operation. + If fewer than min_count non-NA values are present the result will be NA. + + .. versionadded:: 3.4.0 + + Returns + ------- + pyspark.pandas.Series or pyspark.pandas.DataFrame + + See Also + -------- + pyspark.pandas.Series.groupby + pyspark.pandas.DataFrame.groupby + + Examples + -------- + >>> df = ps.DataFrame({'A': [1, 1, 2, 1, 2], + ... 'B': [np.nan, 2, 3, 4, 5], + ... 'C': [1, 2, 1, 1, 2], + ... 'D': [True, False, True, False, True]}) + + Groupby one column and return the prod of the remaining columns in + each group. + + >>> df.groupby('A').prod().sort_index() + B C D + A + 1 8.0 2 0 + 2 15.0 2 11 + + >>> df.groupby('A').prod(min_count=3).sort_index() + B C D + A + 1 NaN 2 0 + 2 NaN NaN NaN + """ + + self._validate_agg_columns(numeric_only=numeric_only, function_name="prod") + + groupkey_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(self._groupkeys))] + internal, agg_columns, sdf = self._prepare_reduce( + groupkey_names=groupkey_names, + accepted_spark_types=(NumericType, BooleanType), + bool_to_numeric=True, + ) + + psdf: DataFrame = DataFrame(internal) + if len(psdf._internal.column_labels) > 0: + + stat_exprs = [] + for label in psdf._internal.column_labels: + psser = psdf._psser_for(label) + column = psser._dtype_op.nan_to_null(psser).spark.column + data_type = psser.spark.data_type + + if isinstance(data_type, IntegralType): + stat_exprs.append(F.product(column).cast(data_type).alias(f"{label[0]}")) + else: + stat_exprs.append(F.product(column).alias(f"{label[0]}")) + + stat_exprs.append(F.count(column).alias(f"{label[0]}_count")) + + sdf = sdf.groupby(*groupkey_names).agg(*stat_exprs) + + if min_count > 0: + for label in psdf._internal.column_labels: + sdf = sdf.withColumn(f"{label[0]}", F.when(F.col(f"{label[0]}_count").__ge__(min_count), Review Comment: we usually choose to create a temporary column with a special name and verify (`verify_temp_column_name`), and then drop it. see below as reference: https://github.com/apache/spark/blob/master/python/pyspark/pandas/groupby.py#L1024-L1032 ########## python/pyspark/pandas/groupby.py: ########## @@ -993,6 +993,98 @@ def nth(self, n: int) -> FrameLike: return self._prepare_return(DataFrame(internal)) + def prod(self, numeric_only: Optional[bool] = True, min_count: int = 0): + """ + Compute prod of groups. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. If None, will attempt to use + everything, then use only numeric data. + + min_count: int, default 0 + The required number of valid values to perform the operation. + If fewer than min_count non-NA values are present the result will be NA. + + .. versionadded:: 3.4.0 + + Returns + ------- + pyspark.pandas.Series or pyspark.pandas.DataFrame + + See Also + -------- + pyspark.pandas.Series.groupby + pyspark.pandas.DataFrame.groupby + + Examples + -------- + >>> df = ps.DataFrame({'A': [1, 1, 2, 1, 2], + ... 'B': [np.nan, 2, 3, 4, 5], + ... 'C': [1, 2, 1, 1, 2], + ... 'D': [True, False, True, False, True]}) + + Groupby one column and return the prod of the remaining columns in + each group. + + >>> df.groupby('A').prod().sort_index() + B C D + A + 1 8.0 2 0 + 2 15.0 2 11 + + >>> df.groupby('A').prod(min_count=3).sort_index() + B C D + A + 1 NaN 2 0 + 2 NaN NaN NaN + """ + + self._validate_agg_columns(numeric_only=numeric_only, function_name="prod") + + groupkey_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(self._groupkeys))] + internal, agg_columns, sdf = self._prepare_reduce( + groupkey_names=groupkey_names, + accepted_spark_types=(NumericType, BooleanType), + bool_to_numeric=True, + ) + + psdf: DataFrame = DataFrame(internal) + if len(psdf._internal.column_labels) > 0: + + stat_exprs = [] + for label in psdf._internal.column_labels: + psser = psdf._psser_for(label) + column = psser._dtype_op.nan_to_null(psser).spark.column + data_type = psser.spark.data_type + + if isinstance(data_type, IntegralType): + stat_exprs.append(F.product(column).cast(data_type).alias(f"{label[0]}")) Review Comment: nit: > let's don't use f-format. we dropped Python 3.5 so it's technically fine but I think it's better to don't break that support only because of this one string format. https://github.com/apache/spark/pull/36509#discussion_r874263076 -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org