Repository: spark
Updated Branches:
  refs/heads/master 8df1da396 -> 13190a4f6


[SPARK-22874][PYSPARK][SQL] Modify checking pandas version to use LooseVersion.

## What changes were proposed in this pull request?

Currently we check pandas version by capturing if `ImportError` for the 
specific imports is raised or not but we can compare `LooseVersion` of the 
version strings as the same as we're checking pyarrow version.

## How was this patch tested?

Existing tests.

Author: Takuya UESHIN <ues...@databricks.com>

Closes #20054 from ueshin/issues/SPARK-22874.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/13190a4f
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/13190a4f
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/13190a4f

Branch: refs/heads/master
Commit: 13190a4f60c081a68812df6df1d8262779cd6fcb
Parents: 8df1da3
Author: Takuya UESHIN <ues...@databricks.com>
Authored: Fri Dec 22 20:09:51 2017 +0900
Committer: hyukjinkwon <gurwls...@gmail.com>
Committed: Fri Dec 22 20:09:51 2017 +0900

----------------------------------------------------------------------
 python/pyspark/sql/dataframe.py |  4 ++--
 python/pyspark/sql/session.py   | 15 +++++++--------
 python/pyspark/sql/tests.py     |  7 ++++---
 python/pyspark/sql/types.py     | 33 +++++++++++++--------------------
 python/pyspark/sql/udf.py       |  4 ++--
 python/pyspark/sql/utils.py     | 11 ++++++++++-
 6 files changed, 38 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/13190a4f/python/pyspark/sql/dataframe.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/dataframe.py b/python/pyspark/sql/dataframe.py
index 440684d..95eca76 100644
--- a/python/pyspark/sql/dataframe.py
+++ b/python/pyspark/sql/dataframe.py
@@ -1906,9 +1906,9 @@ class DataFrame(object):
         if self.sql_ctx.getConf("spark.sql.execution.arrow.enabled", 
"false").lower() == "true":
             try:
                 from pyspark.sql.types import 
_check_dataframe_localize_timestamps
-                from pyspark.sql.utils import _require_minimum_pyarrow_version
+                from pyspark.sql.utils import require_minimum_pyarrow_version
                 import pyarrow
-                _require_minimum_pyarrow_version()
+                require_minimum_pyarrow_version()
                 tables = self._collectAsArrow()
                 if tables:
                     table = pyarrow.concat_tables(tables)

http://git-wip-us.apache.org/repos/asf/spark/blob/13190a4f/python/pyspark/sql/session.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/session.py b/python/pyspark/sql/session.py
index 86db16e..6e5eec4 100644
--- a/python/pyspark/sql/session.py
+++ b/python/pyspark/sql/session.py
@@ -493,15 +493,14 @@ class SparkSession(object):
         data types will be used to coerce the data in Pandas to Arrow 
conversion.
         """
         from pyspark.serializers import ArrowSerializer, _create_batch
-        from pyspark.sql.types import from_arrow_schema, to_arrow_type, \
-            _old_pandas_exception_message, TimestampType
-        from pyspark.sql.utils import _require_minimum_pyarrow_version
-        try:
-            from pandas.api.types import is_datetime64_dtype, 
is_datetime64tz_dtype
-        except ImportError as e:
-            raise ImportError(_old_pandas_exception_message(e))
+        from pyspark.sql.types import from_arrow_schema, to_arrow_type, 
TimestampType
+        from pyspark.sql.utils import require_minimum_pandas_version, \
+            require_minimum_pyarrow_version
+
+        require_minimum_pandas_version()
+        require_minimum_pyarrow_version()
 
-        _require_minimum_pyarrow_version()
+        from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
 
         # Determine arrow types to coerce data when creating batches
         if isinstance(schema, StructType):

http://git-wip-us.apache.org/repos/asf/spark/blob/13190a4f/python/pyspark/sql/tests.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py
index 6fdfda1..b977160 100644
--- a/python/pyspark/sql/tests.py
+++ b/python/pyspark/sql/tests.py
@@ -53,7 +53,8 @@ _have_old_pandas = False
 try:
     import pandas
     try:
-        import pandas.api
+        from pyspark.sql.utils import require_minimum_pandas_version
+        require_minimum_pandas_version()
         _have_pandas = True
     except:
         _have_old_pandas = True
@@ -2600,7 +2601,7 @@ class SQLTests(ReusedSQLTestCase):
     @unittest.skipIf(not _have_old_pandas, "Old Pandas not installed")
     def test_to_pandas_old(self):
         with QuietTest(self.sc):
-            with self.assertRaisesRegexp(ImportError, 'Pandas \(.*\) must be 
installed'):
+            with self.assertRaisesRegexp(ImportError, 'Pandas >= .* must be 
installed'):
                 self._to_pandas()
 
     @unittest.skipIf(not _have_pandas, "Pandas not installed")
@@ -2643,7 +2644,7 @@ class SQLTests(ReusedSQLTestCase):
         pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
                             "d": [pd.Timestamp.now().date()]})
         with QuietTest(self.sc):
-            with self.assertRaisesRegexp(ImportError, 'Pandas \(.*\) must be 
installed'):
+            with self.assertRaisesRegexp(ImportError, 'Pandas >= .* must be 
installed'):
                 self.spark.createDataFrame(pdf)
 
 

http://git-wip-us.apache.org/repos/asf/spark/blob/13190a4f/python/pyspark/sql/types.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/types.py b/python/pyspark/sql/types.py
index 46d9a41..063264a 100644
--- a/python/pyspark/sql/types.py
+++ b/python/pyspark/sql/types.py
@@ -1678,13 +1678,6 @@ def from_arrow_schema(arrow_schema):
          for field in arrow_schema])
 
 
-def _old_pandas_exception_message(e):
-    """ Create an error message for importing old Pandas.
-    """
-    msg = "note: Pandas (>=0.19.2) must be installed and available on calling 
Python process"
-    return "%s\n%s" % (_exception_message(e), msg)
-
-
 def _check_dataframe_localize_timestamps(pdf, timezone):
     """
     Convert timezone aware timestamps to timezone-naive in the specified 
timezone or local timezone
@@ -1693,10 +1686,10 @@ def _check_dataframe_localize_timestamps(pdf, timezone):
     :param timezone: the timezone to convert. if None then use local timezone
     :return pandas.DataFrame where any timezone aware columns have been 
converted to tz-naive
     """
-    try:
-        from pandas.api.types import is_datetime64tz_dtype
-    except ImportError as e:
-        raise ImportError(_old_pandas_exception_message(e))
+    from pyspark.sql.utils import require_minimum_pandas_version
+    require_minimum_pandas_version()
+
+    from pandas.api.types import is_datetime64tz_dtype
     tz = timezone or 'tzlocal()'
     for column, series in pdf.iteritems():
         # TODO: handle nested timestamps, such as ArrayType(TimestampType())?
@@ -1714,10 +1707,10 @@ def _check_series_convert_timestamps_internal(s, 
timezone):
     :param timezone: the timezone to convert. if None then use local timezone
     :return pandas.Series where if it is a timestamp, has been UTC normalized 
without a time zone
     """
-    try:
-        from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
-    except ImportError as e:
-        raise ImportError(_old_pandas_exception_message(e))
+    from pyspark.sql.utils import require_minimum_pandas_version
+    require_minimum_pandas_version()
+
+    from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
     # TODO: handle nested timestamps, such as ArrayType(TimestampType())?
     if is_datetime64_dtype(s.dtype):
         tz = timezone or 'tzlocal()'
@@ -1737,11 +1730,11 @@ def _check_series_convert_timestamps_localize(s, 
from_timezone, to_timezone):
     :param to_timezone: the timezone to convert to. if None then use local 
timezone
     :return pandas.Series where if it is a timestamp, has been converted to 
tz-naive
     """
-    try:
-        import pandas as pd
-        from pandas.api.types import is_datetime64tz_dtype, is_datetime64_dtype
-    except ImportError as e:
-        raise ImportError(_old_pandas_exception_message(e))
+    from pyspark.sql.utils import require_minimum_pandas_version
+    require_minimum_pandas_version()
+
+    import pandas as pd
+    from pandas.api.types import is_datetime64tz_dtype, is_datetime64_dtype
     from_tz = from_timezone or 'tzlocal()'
     to_tz = to_timezone or 'tzlocal()'
     # TODO: handle nested timestamps, such as ArrayType(TimestampType())?

http://git-wip-us.apache.org/repos/asf/spark/blob/13190a4f/python/pyspark/sql/udf.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/udf.py b/python/pyspark/sql/udf.py
index 50c87ba..1231381 100644
--- a/python/pyspark/sql/udf.py
+++ b/python/pyspark/sql/udf.py
@@ -37,9 +37,9 @@ def _create_udf(f, returnType, evalType):
     if evalType == PythonEvalType.SQL_PANDAS_SCALAR_UDF or \
             evalType == PythonEvalType.SQL_PANDAS_GROUP_MAP_UDF:
         import inspect
-        from pyspark.sql.utils import _require_minimum_pyarrow_version
+        from pyspark.sql.utils import require_minimum_pyarrow_version
 
-        _require_minimum_pyarrow_version()
+        require_minimum_pyarrow_version()
         argspec = inspect.getargspec(f)
 
         if evalType == PythonEvalType.SQL_PANDAS_SCALAR_UDF and 
len(argspec.args) == 0 and \

http://git-wip-us.apache.org/repos/asf/spark/blob/13190a4f/python/pyspark/sql/utils.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/utils.py b/python/pyspark/sql/utils.py
index cc7dabb..fb7d42a 100644
--- a/python/pyspark/sql/utils.py
+++ b/python/pyspark/sql/utils.py
@@ -112,7 +112,16 @@ def toJArray(gateway, jtype, arr):
     return jarr
 
 
-def _require_minimum_pyarrow_version():
+def require_minimum_pandas_version():
+    """ Raise ImportError if minimum version of Pandas is not installed
+    """
+    from distutils.version import LooseVersion
+    import pandas
+    if LooseVersion(pandas.__version__) < LooseVersion('0.19.2'):
+        raise ImportError("Pandas >= 0.19.2 must be installed on calling 
Python process")
+
+
+def require_minimum_pyarrow_version():
     """ Raise ImportError if minimum version of pyarrow is not installed
     """
     from distutils.version import LooseVersion


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to