[ 
https://issues.apache.org/jira/browse/SPARK-41847?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17676874#comment-17676874
 ] 

Apache Spark commented on SPARK-41847:
--------------------------------------

User 'zhengruifeng' has created a pull request for this issue:
https://github.com/apache/spark/pull/39568

> DataFrame mapfield,structlist invalid type
> ------------------------------------------
>
>                 Key: SPARK-41847
>                 URL: https://issues.apache.org/jira/browse/SPARK-41847
>             Project: Spark
>          Issue Type: Sub-task
>          Components: Connect
>    Affects Versions: 3.4.0
>            Reporter: Sandeep Singh
>            Assignee: Ruifeng Zheng
>            Priority: Major
>             Fix For: 3.4.0
>
>
> {code:java}
> File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/functions.py", 
> line 1270, in pyspark.sql.connect.functions.explode
> Failed example:
>     eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
> Exception raised:
>     Traceback (most recent call last):
>       File 
> "/usr/local/Cellar/python@3.10/3.10.8/Frameworks/Python.framework/Versions/3.10/lib/python3.10/doctest.py",
>  line 1350, in __run
>         exec(compile(example.source, filename, "single",
>       File "<doctest pyspark.sql.connect.functions.explode[3]>", line 1, in 
> <module>
>         eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", 
> line 534, in show
>         print(self._show_string(n, truncate, vertical))
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", 
> line 423, in _show_string
>         ).toPandas()
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", 
> line 1031, in toPandas
>         return self._session.client.to_pandas(query)
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", 
> line 413, in to_pandas
>         return self._execute_and_fetch(req)
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", 
> line 573, in _execute_and_fetch
>         self._handle_error(rpc_error)
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", 
> line 619, in _handle_error
>         raise SparkConnectAnalysisException(
>     pyspark.sql.connect.client.SparkConnectAnalysisException: 
> [INVALID_COLUMN_OR_FIELD_DATA_TYPE] Column or field `mapfield` is of type 
> "STRUCT<a: STRING>" while it's required to be "MAP<STRING, STRING>".
>     Plan:  {code}
> {code:java}
> File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/functions.py", 
> line 1364, in pyspark.sql.connect.functions.inline
> Failed example:
>     df.select(inline(df.structlist)).show()
> Exception raised:
>     Traceback (most recent call last):
>       File 
> "/usr/local/Cellar/python@3.10/3.10.8/Frameworks/Python.framework/Versions/3.10/lib/python3.10/doctest.py",
>  line 1350, in __run
>         exec(compile(example.source, filename, "single",
>       File "<doctest pyspark.sql.connect.functions.inline[2]>", line 1, in 
> <module>
>         df.select(inline(df.structlist)).show()
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", 
> line 534, in show
>         print(self._show_string(n, truncate, vertical))
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", 
> line 423, in _show_string
>         ).toPandas()
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", 
> line 1031, in toPandas
>         return self._session.client.to_pandas(query)
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", 
> line 413, in to_pandas
>         return self._execute_and_fetch(req)
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", 
> line 573, in _execute_and_fetch
>         self._handle_error(rpc_error)
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", 
> line 619, in _handle_error
>         raise SparkConnectAnalysisException(
>     pyspark.sql.connect.client.SparkConnectAnalysisException: 
> [INVALID_COLUMN_OR_FIELD_DATA_TYPE] Column or field `structlist`.`element` is 
> of type "ARRAY<BIGINT>" while it's required to be "STRUCT<a: BIGINT, b: 
> BIGINT>".
>     Plan:  {code}
> {code:java}
> File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/functions.py", 
> line 1411, in pyspark.sql.connect.functions.map_filter
> Failed example:
>     df.select(map_filter(
>         "data", lambda _, v: v > 30.0).alias("data_filtered")
>     ).show(truncate=False)
> Exception raised:
>     Traceback (most recent call last):
>       File 
> "/usr/local/Cellar/python@3.10/3.10.8/Frameworks/Python.framework/Versions/3.10/lib/python3.10/doctest.py",
>  line 1350, in __run
>         exec(compile(example.source, filename, "single",
>       File "<doctest pyspark.sql.connect.functions.map_filter[1]>", line 1, 
> in <module>
>         df.select(map_filter(
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", 
> line 534, in show
>         print(self._show_string(n, truncate, vertical))
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", 
> line 423, in _show_string
>         ).toPandas()
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", 
> line 1031, in toPandas
>         return self._session.client.to_pandas(query)
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", 
> line 413, in to_pandas
>         return self._execute_and_fetch(req)
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", 
> line 573, in _execute_and_fetch
>         self._handle_error(rpc_error)
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", 
> line 619, in _handle_error
>         raise SparkConnectAnalysisException(
>     pyspark.sql.connect.client.SparkConnectAnalysisException: 
> [DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE] Cannot resolve "map_filter(data, 
> lambdafunction(`>`(y_9, 30.0), x_8, y_9))" due to data type mismatch: 
> Parameter 1 requires the "MAP" type, however "data" has the type "STRUCT<bar: 
> DOUBLE, baz: DOUBLE, foo: DOUBLE>".
>     Plan: 'Project [map_filter(data#3499, lambdafunction('`>`(lambda 'y_9, 
> 30.0), lambda 'x_8, lambda 'y_9, false)) AS data_filtered#3502]
>     +- Project [0#3494L AS id#3498L, 1#3495 AS data#3499]
>        +- LocalRelation [0#3494L, 1#3495]
> **********************************************************************
> File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/functions.py", 
> line 1453, in pyspark.sql.connect.functions.map_zip_with
> Failed example:
>     df.select(map_zip_with(
>         "base", "ratio", lambda k, v1, v2: round(v1 * v2, 
> 2)).alias("updated_data")
>     ).show(truncate=False)
> Exception raised:
>     Traceback (most recent call last):
>       File 
> "/usr/local/Cellar/python@3.10/3.10.8/Frameworks/Python.framework/Versions/3.10/lib/python3.10/doctest.py",
>  line 1350, in __run
>         exec(compile(example.source, filename, "single",
>       File "<doctest pyspark.sql.connect.functions.map_zip_with[1]>", line 1, 
> in <module>
>         df.select(map_zip_with(
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", 
> line 534, in show
>         print(self._show_string(n, truncate, vertical))
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", 
> line 423, in _show_string
>         ).toPandas()
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", 
> line 1031, in toPandas
>         return self._session.client.to_pandas(query)
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", 
> line 413, in to_pandas
>         return self._execute_and_fetch(req)
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", 
> line 573, in _execute_and_fetch
>         self._handle_error(rpc_error)
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", 
> line 619, in _handle_error
>         raise SparkConnectAnalysisException(
>     pyspark.sql.connect.client.SparkConnectAnalysisException: 
> [DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE] Cannot resolve "map_zip_with(base, 
> ratio, lambdafunction(round(`*`(y_11, z_12), 2), x_10, y_11, z_12))" due to 
> data type mismatch: Parameter 1 requires the "MAP" type, however "base" has 
> the type "STRUCT<IT: DOUBLE, SALES: DOUBLE>".
>     Plan: 'Project [map_zip_with(base#3573, ratio#3574, 
> lambdafunction('round('`*`(lambda 'y_11, lambda 'z_12), 2), lambda 'x_10, 
> lambda 'y_11, lambda 'z_12, false)) AS updated_data#3578]
>     +- Project [0#3566L AS id#3572L, 1#3567 AS base#3573, 2#3568 AS 
> ratio#3574]
>        +- LocalRelation [0#3566L, 1#3567, 2#3568] {code}
> {code:java}
> File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/functions.py", 
> line 1263, in pyspark.sql.connect.functions.element_at
> Failed example:
>     df.select(element_at(df.data, lit("a"))).collect()
> Exception raised:
>     Traceback (most recent call last):
>       File 
> "/usr/local/Cellar/python@3.10/3.10.8/Frameworks/Python.framework/Versions/3.10/lib/python3.10/doctest.py",
>  line 1350, in __run
>         exec(compile(example.source, filename, "single",
>       File "<doctest pyspark.sql.connect.functions.element_at[4]>", line 1, 
> in <module>
>         df.select(element_at(df.data, lit("a"))).collect()
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", 
> line 1017, in collect
>         pdf = self.toPandas()
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/dataframe.py", 
> line 1031, in toPandas
>         return self._session.client.to_pandas(query)
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", 
> line 413, in to_pandas
>         return self._execute_and_fetch(req)
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", 
> line 573, in _execute_and_fetch
>         self._handle_error(rpc_error)
>       File 
> "/Users/s.singh/personal/spark-oss/python/pyspark/sql/connect/client.py", 
> line 619, in _handle_error
>         raise SparkConnectAnalysisException(
>     pyspark.sql.connect.client.SparkConnectAnalysisException: 
> [DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE] Cannot resolve "element_at(data, 
> a)" due to data type mismatch: Parameter 1 requires the ("ARRAY" or "MAP") 
> type, however "data" has the type "STRUCT<a: DOUBLE, b: DOUBLE>".
>     Plan: 'Project [unresolvedalias(element_at(data#2393, a, None, false), 
> None)]
>     +- Project [0#2391 AS data#2393]
>        +- LocalRelation [0#2391] {code}



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to