This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
     new 88bba0c  [SPARK-36697][PYTHON] Fix dropping all columns of a DataFrame
88bba0c is described below

commit 88bba0c94be6946add41c5ea8510b912133205e9
Author: Xinrong Meng <xinrong.m...@databricks.com>
AuthorDate: Thu Sep 9 09:59:42 2021 +0900

    [SPARK-36697][PYTHON] Fix dropping all columns of a DataFrame
    
    ### What changes were proposed in this pull request?
    Fix dropping all columns of a DataFrame
    
    ### Why are the changes needed?
    When dropping all columns of a pandas-on-Spark DataFrame, a ValueError is 
raised.
    Whereas in pandas, an empty DataFrame reserving the index is returned.
    We should follow pandas.
    
    ### Does this PR introduce _any_ user-facing change?
    Yes.
    
    From
    ```py
    >>> psdf = ps.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]})
    >>> psdf
       x  y  z
    0  1  3  5
    1  2  4  6
    
    >>> psdf.drop(['x', 'y', 'z'])
    Traceback (most recent call last):
    ...
    ValueError: not enough values to unpack (expected 2, got 0)
    
    ```
    To
    ```py
    >>> psdf = ps.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]})
    >>> psdf
       x  y  z
    0  1  3  5
    1  2  4  6
    
    >>> psdf.drop(['x', 'y', 'z'])
    Empty DataFrame
    Columns: []
    Index: [0, 1]
    ```
    
    ### How was this patch tested?
    Unit tests.
    
    Closes #33938 from xinrong-databricks/frame_drop_col.
    
    Authored-by: Xinrong Meng <xinrong.m...@databricks.com>
    Signed-off-by: Hyukjin Kwon <gurwls...@apache.org>
    (cherry picked from commit 33bb7b39e927ee8c357fd4f8c073be6f3a1d2fb0)
    Signed-off-by: Hyukjin Kwon <gurwls...@apache.org>
---
 python/pyspark/pandas/frame.py                | 17 ++++++++++-------
 python/pyspark/pandas/tests/test_dataframe.py | 13 +++++++++++++
 2 files changed, 23 insertions(+), 7 deletions(-)

diff --git a/python/pyspark/pandas/frame.py b/python/pyspark/pandas/frame.py
index 48be138..ec6b261 100644
--- a/python/pyspark/pandas/frame.py
+++ b/python/pyspark/pandas/frame.py
@@ -6726,14 +6726,17 @@ defaultdict(<class 'list'>, {'col..., 'col...})]
             )
             if len(drop_column_labels) == 0:
                 raise KeyError(columns)
-            cols, labels = zip(
-                *(
-                    (column, label)
-                    for column, label in zip(
-                        self._internal.data_spark_column_names, 
self._internal.column_labels
-                    )
-                    if label not in drop_column_labels
+
+            keep_columns_and_labels = [
+                (column, label)
+                for column, label in zip(
+                    self._internal.data_spark_column_names, 
self._internal.column_labels
                 )
+                if label not in drop_column_labels
+            ]
+
+            cols, labels = (
+                zip(*keep_columns_and_labels) if len(keep_columns_and_labels) 
> 0 else ([], [])
             )
             internal = self._internal.with_new_columns([self._psser_for(label) 
for label in labels])
             return DataFrame(internal)
diff --git a/python/pyspark/pandas/tests/test_dataframe.py 
b/python/pyspark/pandas/tests/test_dataframe.py
index 5fe1165..11da18c 100644
--- a/python/pyspark/pandas/tests/test_dataframe.py
+++ b/python/pyspark/pandas/tests/test_dataframe.py
@@ -1263,9 +1263,11 @@ class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
         self.assert_eq(psdf.drop("x"), pdf.drop("x", axis=1))
         # Assert using a list for 'labels' works
         self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], 
axis=1))
+        self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", 
"z"], axis=1))
         # Assert using 'columns' instead of 'labels' produces the same results
         self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
         self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", 
"z"]))
+        self.assert_eq(psdf.drop(columns=["x", "y", "z"]), 
pdf.drop(columns=["x", "y", "z"]))
 
         # Assert 'labels' being used when both 'labels' and 'columns' are 
specified
         # TODO: should throw an error?
@@ -1279,10 +1281,21 @@ class DataFrameTest(PandasOnSparkTestCase, 
SQLTestUtils):
         self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
         self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
         self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, 
"x"), 2]))
+        self.assert_eq(
+            psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
+            pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
+        )
 
         self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
         self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
 
+        pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
+        psdf = ps.from_pandas(pdf)
+        self.assert_eq(
+            psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
+            pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
+        )
+
         # non-string names
         pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, 
index=np.random.rand(2))
         psdf = ps.from_pandas(pdf)

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to