Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package python-geopandas for 
openSUSE:Factory checked in at 2024-05-06 17:53:19
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-geopandas (Old)
 and      /work/SRC/openSUSE:Factory/.python-geopandas.new.1880 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-geopandas"

Mon May  6 17:53:19 2024 rev:9 rq:1171986 version:0.14.4

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-geopandas/python-geopandas.changes        
2024-03-22 15:32:48.099810812 +0100
+++ 
/work/SRC/openSUSE:Factory/.python-geopandas.new.1880/python-geopandas.changes  
    2024-05-06 17:53:36.735870963 +0200
@@ -1,0 +2,7 @@
+Sun May  5 10:59:24 UTC 2024 - Ben Greiner <c...@bnavigator.de>
+
+- Update to 0.14.4
+  * Several fixes for compatibility with the upcoming pandas 3.0,
+    numpy 2.0 and fiona 1.10 releases.
+
+-------------------------------------------------------------------

Old:
----
  geopandas-0.14.3.tar.gz

New:
----
  geopandas-0.14.4.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-geopandas.spec ++++++
--- /var/tmp/diff_new_pack.YvynMJ/_old  2024-05-06 17:53:37.299891533 +0200
+++ /var/tmp/diff_new_pack.YvynMJ/_new  2024-05-06 17:53:37.299891533 +0200
@@ -25,12 +25,13 @@
 %bcond_with test
 %endif
 Name:           python-geopandas%{psuffix}
-Version:        0.14.3
+Version:        0.14.4
 Release:        0
 Summary:        Geographic pandas extensions
 License:        BSD-3-Clause
 Group:          Development/Languages/Python
 URL:            https://geopandas.org
+# SourceRepository: https://github.com/geopandas/geopandas
 Source:         
https://files.pythonhosted.org/packages/source/g/geopandas/geopandas-%{version}.tar.gz
 BuildRequires:  %{python_module base >= 3.9}
 BuildRequires:  %{python_module pip}
@@ -40,6 +41,7 @@
 BuildRequires:  python-rpm-macros
 Requires:       proj
 Requires:       python-Fiona >= 1.8.21
+Requires:       python-numpy >= 1.22
 Requires:       python-packaging
 Requires:       python-pandas >= 1.4.0
 Requires:       python-pyproj >= 3.3.0
@@ -54,6 +56,7 @@
 BuildRequires:  %{python_module geopandas = %{version}}
 BuildRequires:  %{python_module geopy}
 BuildRequires:  %{python_module matplotlib >= 3.5.0}
+BuildRequires:  %{python_module numpy >= 1.22}
 BuildRequires:  %{python_module psycopg2}
 BuildRequires:  %{python_module pyarrow}
 BuildRequires:  %{python_module pytest}

++++++ geopandas-0.14.3.tar.gz -> geopandas-0.14.4.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/PKG-INFO 
new/geopandas-0.14.4/PKG-INFO
--- old/geopandas-0.14.3/PKG-INFO       2024-01-31 20:21:48.239030100 +0100
+++ new/geopandas-0.14.4/PKG-INFO       2024-04-28 15:49:18.152525000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: geopandas
-Version: 0.14.3
+Version: 0.14.4
 Summary: Geographic pandas extensions
 Author-email: Kelsey Jordahl <kjord...@alum.mit.edu>
 Maintainer: GeoPandas contributors
@@ -19,6 +19,7 @@
 Description-Content-Type: text/x-rst
 License-File: LICENSE.txt
 Requires-Dist: fiona>=1.8.21
+Requires-Dist: numpy>=1.22
 Requires-Dist: packaging
 Requires-Dist: pandas>=1.4.0
 Requires-Dist: pyproj>=3.3.0
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/_compat.py 
new/geopandas-0.14.4/geopandas/_compat.py
--- old/geopandas-0.14.3/geopandas/_compat.py   2024-01-31 20:21:34.000000000 
+0100
+++ new/geopandas-0.14.4/geopandas/_compat.py   2024-04-28 15:49:10.000000000 
+0200
@@ -18,7 +18,8 @@
 PANDAS_GE_15 = Version(pd.__version__) >= Version("1.5.0")
 PANDAS_GE_20 = Version(pd.__version__) >= Version("2.0.0")
 PANDAS_GE_21 = Version(pd.__version__) >= Version("2.1.0")
-PANDAS_GE_22 = Version(pd.__version__) >= Version("2.2.0.dev0")
+PANDAS_GE_22 = Version(pd.__version__) >= Version("2.2.0")
+PANDAS_GE_30 = Version(pd.__version__) >= Version("3.0.0.dev0")
 
 
 # -----------------------------------------------------------------------------
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/_version.py 
new/geopandas-0.14.4/geopandas/_version.py
--- old/geopandas-0.14.3/geopandas/_version.py  2024-01-31 20:21:48.239030100 
+0100
+++ new/geopandas-0.14.4/geopandas/_version.py  2024-04-28 15:49:18.152525000 
+0200
@@ -8,11 +8,11 @@
 
 version_json = '''
 {
- "date": "2024-01-31T20:20:12+0100",
+ "date": "2024-04-28T15:48:09+0200",
  "dirty": false,
  "error": null,
- "full-revisionid": "5558c35297a537b05675d236ee550612460299ec",
- "version": "0.14.3"
+ "full-revisionid": "60c9773e44fff8a35344c2a74431e00c5546a4ee",
+ "version": "0.14.4"
 }
 '''  # END VERSION_JSON
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/array.py 
new/geopandas-0.14.4/geopandas/array.py
--- old/geopandas-0.14.3/geopandas/array.py     2024-01-31 20:21:34.000000000 
+0100
+++ new/geopandas-0.14.4/geopandas/array.py     2024-04-28 15:49:10.000000000 
+0200
@@ -1014,14 +1014,19 @@
             # TODO with numpy >= 1.15, the 'initial' argument can be used
             return np.array([np.nan, np.nan, np.nan, np.nan])
         b = self.bounds
-        return np.array(
-            (
-                np.nanmin(b[:, 0]),  # minx
-                np.nanmin(b[:, 1]),  # miny
-                np.nanmax(b[:, 2]),  # maxx
-                np.nanmax(b[:, 3]),  # maxy
+        with warnings.catch_warnings():
+            # if all rows are empty geometry / none, nan is expected
+            warnings.filterwarnings(
+                "ignore", r"All-NaN slice encountered", RuntimeWarning
+            )
+            return np.array(
+                (
+                    np.nanmin(b[:, 0]),  # minx
+                    np.nanmin(b[:, 1]),  # miny
+                    np.nanmax(b[:, 2]),  # maxx
+                    np.nanmax(b[:, 3]),  # maxy
+                )
             )
-        )
 
     # -------------------------------------------------------------------------
     # general array like compat
@@ -1159,7 +1164,13 @@
                 return pd.array(string_values, dtype=pd_dtype)
             return string_values.astype(dtype, copy=False)
         else:
-            return np.array(self, dtype=dtype, copy=copy)
+            # numpy 2.0 makes copy=False case strict (errors if cannot avoid 
the copy)
+            # -> in that case use `np.asarray` as backwards compatible 
alternative
+            # for `copy=None` (when requiring numpy 2+, this can be cleaned up)
+            if not copy:
+                return np.asarray(self, dtype=dtype)
+            else:
+                return np.array(self, dtype=dtype, copy=copy)
 
     def isna(self):
         """
@@ -1469,7 +1480,7 @@
             f"does not support reduction '{name}'"
         )
 
-    def __array__(self, dtype=None):
+    def __array__(self, dtype=None, copy=None):
         """
         The numpy array interface.
 
@@ -1477,7 +1488,11 @@
         -------
         values : numpy array
         """
-        return to_shapely(self)
+        if compat.USE_PYGEOS:
+            return to_shapely(self)
+        if copy and (dtype is None or dtype == np.dtype("object")):
+            return self._data.copy()
+        return self._data
 
     def _binop(self, other, op):
         def convert_values(param):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/base.py 
new/geopandas-0.14.4/geopandas/base.py
--- old/geopandas-0.14.3/geopandas/base.py      2024-01-31 20:21:34.000000000 
+0100
+++ new/geopandas-0.14.4/geopandas/base.py      2024-04-28 15:49:10.000000000 
+0200
@@ -4467,7 +4467,7 @@
             index_arrays.append(inner_index)
 
             index = pd.MultiIndex.from_arrays(
-                index_arrays, names=orig_idx.names + [None]
+                index_arrays, names=list(orig_idx.names) + [None]
             )
 
         else:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/geodataframe.py 
new/geopandas-0.14.4/geopandas/geodataframe.py
--- old/geopandas-0.14.3/geopandas/geodataframe.py      2024-01-31 
20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/geopandas/geodataframe.py      2024-04-28 
15:49:10.000000000 +0200
@@ -306,7 +306,10 @@
         if inplace:
             frame = self
         else:
-            frame = self.copy()
+            if compat.PANDAS_GE_30:
+                frame = self.copy(deep=False)
+            else:
+                frame = self.copy()
 
         to_remove = None
         geo_column_name = self._geometry_column_name
@@ -1947,7 +1950,7 @@
         return df
 
     # overrides the pandas astype method to ensure the correct return type
-    def astype(self, dtype, copy=True, errors="raise", **kwargs):
+    def astype(self, dtype, copy=None, errors="raise", **kwargs):
         """
         Cast a pandas object to a specified dtype ``dtype``.
 
@@ -1960,7 +1963,12 @@
         -------
         GeoDataFrame or DataFrame
         """
-        df = super().astype(dtype, copy=copy, errors=errors, **kwargs)
+        if not compat.PANDAS_GE_30 and copy is None:
+            copy = True
+        if copy is not None:
+            kwargs["copy"] = copy
+
+        df = super().astype(dtype, errors=errors, **kwargs)
 
         try:
             geoms = df[self._geometry_column_name]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/geoseries.py 
new/geopandas-0.14.4/geopandas/geoseries.py
--- old/geopandas-0.14.3/geopandas/geoseries.py 2024-01-31 20:21:34.000000000 
+0100
+++ new/geopandas-0.14.4/geopandas/geoseries.py 2024-04-28 15:49:10.000000000 
+0200
@@ -208,9 +208,16 @@
                         "Non geometry data passed to GeoSeries constructor, "
                         f"received data of dtype '{s.dtype}'"
                     )
-            # try to convert to GeometryArray, if fails return plain Series
+            # extract object-dtype numpy array from pandas Series; with CoW 
this
+            # gives a read-only array, so we try to set the flag back to 
writeable
+            data = s.to_numpy()
             try:
-                data = from_shapely(s.values, crs)
+                data.flags.writeable = True
+            except ValueError:
+                pass
+            # try to convert to GeometryArray
+            try:
+                data = from_shapely(data, crs)
             except TypeError:
                 raise TypeError(
                     "Non geometry data passed to GeoSeries constructor, "
@@ -778,12 +785,10 @@
         """Alias for `notna` method. See `notna` for more detail."""
         return self.notna()
 
-    def fillna(self, value=None, method=None, inplace: bool = False, **kwargs):
+    def fillna(self, value=None, inplace: bool = False, **kwargs):
         """
         Fill NA values with geometry (or geometries).
 
-        ``method`` is currently not implemented.
-
         Parameters
         ----------
         value : shapely geometry or GeoSeries, default None
@@ -852,7 +857,7 @@
         """
         if value is None:
             value = GeometryCollection() if compat.SHAPELY_GE_20 else 
BaseGeometry()
-        return super().fillna(value=value, method=method, inplace=inplace, 
**kwargs)
+        return super().fillna(value=value, inplace=inplace, **kwargs)
 
     def __contains__(self, other) -> bool:
         """Allow tests of the form "geom in s"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/io/file.py 
new/geopandas-0.14.4/geopandas/io/file.py
--- old/geopandas-0.14.3/geopandas/io/file.py   2024-01-31 20:21:34.000000000 
+0100
+++ new/geopandas-0.14.4/geopandas/io/file.py   2024-04-28 15:49:10.000000000 
+0200
@@ -5,6 +5,7 @@
 
 import numpy as np
 import pandas as pd
+from geopandas.io.util import vsi_path
 from pandas.api.types import is_integer_dtype
 
 import pyproj
@@ -55,6 +56,7 @@
             FIONA_GE_19 = Version(Version(fiona.__version__).base_version) >= 
Version(
                 "1.9.0"
             )
+
         except ImportError as err:
             fiona = False
             fiona_import_error = str(err)
@@ -168,16 +170,6 @@
         return False
 
 
-def _is_zip(path):
-    """Check if a given path is a zipfile"""
-    parsed = fiona.path.ParsedPath.from_uri(path)
-    return (
-        parsed.archive.endswith(".zip")
-        if parsed.archive
-        else parsed.path.endswith(".zip")
-    )
-
-
 def _read_file(filename, bbox=None, mask=None, rows=None, engine=None, 
**kwargs):
     """
     Returns a GeoDataFrame from a file or URL.
@@ -312,22 +304,7 @@
         # Opening a file via URL or file-like-object above automatically 
detects a
         # zipped file. In order to match that behavior, attempt to add a zip 
scheme
         # if missing.
-        if _is_zip(str(path_or_bytes)):
-            parsed = fiona.parse_path(str(path_or_bytes))
-            if isinstance(parsed, fiona.path.ParsedPath):
-                # If fiona is able to parse the path, we can safely look at 
the scheme
-                # and update it to have a zip scheme if necessary.
-                schemes = (parsed.scheme or "").split("+")
-                if "zip" not in schemes:
-                    parsed.scheme = "+".join(["zip"] + schemes)
-                path_or_bytes = parsed.name
-            elif isinstance(parsed, fiona.path.UnparsedPath) and not str(
-                path_or_bytes
-            ).startswith("/vsi"):
-                # If fiona is unable to parse the path, it might have a 
Windows drive
-                # scheme. Try adding zip:// to the front. If the path starts 
with "/vsi"
-                # it is a legacy GDAL path type, so let it pass unmodified.
-                path_or_bytes = "zip://" + parsed.name
+        path_or_bytes = vsi_path(str(path_or_bytes))
 
     if from_bytes:
         reader = fiona.BytesCollection
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/io/tests/test_file.py 
new/geopandas-0.14.4/geopandas/io/tests/test_file.py
--- old/geopandas-0.14.3/geopandas/io/tests/test_file.py        2024-01-31 
20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/geopandas/io/tests/test_file.py        2024-04-28 
15:49:10.000000000 +0200
@@ -130,7 +130,7 @@
     df = GeoDataFrame.from_file(tempfilename, engine=engine)
     assert "geometry" in df
     assert len(df) == 5
-    assert np.alltrue(df["BoroName"].values == df_nybb["BoroName"])
+    assert np.all(df["BoroName"].values == df_nybb["BoroName"])
 
     # Write layer with null geometry out to file
     tempfilename = os.path.join(str(tmpdir), "null_geom" + ext)
@@ -139,7 +139,7 @@
     df = GeoDataFrame.from_file(tempfilename, engine=engine)
     assert "geometry" in df
     assert len(df) == 2
-    assert np.alltrue(df["Name"].values == df_null["Name"])
+    assert np.all(df["Name"].values == df_null["Name"])
     # check the expected driver
     assert_correct_driver(tempfilename, ext, engine)
 
@@ -153,7 +153,7 @@
     df = GeoDataFrame.from_file(temppath, engine=engine)
     assert "geometry" in df
     assert len(df) == 5
-    assert np.alltrue(df["BoroName"].values == df_nybb["BoroName"])
+    assert np.all(df["BoroName"].values == df_nybb["BoroName"])
     # check the expected driver
     assert_correct_driver(temppath, ext, engine)
 
@@ -1113,7 +1113,7 @@
     # index as string
     df_p = df_points.copy()
     df = GeoDataFrame(df_p["value1"], geometry=df_p.geometry)
-    df.index = pd.TimedeltaIndex(range(len(df)), "days")
+    df.index = pd.to_timedelta(range(len(df)), unit="days")
     # TODO: TimedeltaIndex is an invalid field type
     df.index = df.index.astype(str)
     do_checks(df, index_is_used=True)
@@ -1121,7 +1121,7 @@
     # unnamed DatetimeIndex
     df_p = df_points.copy()
     df = GeoDataFrame(df_p["value1"], geometry=df_p.geometry)
-    df.index = pd.TimedeltaIndex(range(len(df)), "days") + pd.DatetimeIndex(
+    df.index = pd.to_timedelta(range(len(df)), unit="days") + pd.to_datetime(
         ["1999-12-27"] * len(df)
     )
     if driver == "ESRI Shapefile":
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/geopandas-0.14.3/geopandas/io/tests/test_file_geom_types_drivers.py 
new/geopandas-0.14.4/geopandas/io/tests/test_file_geom_types_drivers.py
--- old/geopandas-0.14.3/geopandas/io/tests/test_file_geom_types_drivers.py     
2024-01-31 20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/geopandas/io/tests/test_file_geom_types_drivers.py     
2024-04-28 15:49:10.000000000 +0200
@@ -244,7 +244,14 @@
     return request.param
 
 
-@pytest.fixture(params=["GeoJSON", "ESRI Shapefile", "GPKG", "SQLite"])
+@pytest.fixture(
+    params=[
+        ("GeoJSON", ".geojson"),
+        ("ESRI Shapefile", ".shp"),
+        ("GPKG", ".gpkg"),
+        ("SQLite", ".sqlite"),
+    ]
+)
 def ogr_driver(request):
     return request.param
 
@@ -260,9 +267,10 @@
 
 
 def test_to_file_roundtrip(tmpdir, geodataframe, ogr_driver, engine):
-    output_file = os.path.join(str(tmpdir), "output_file")
+    driver, ext = ogr_driver
+    output_file = os.path.join(str(tmpdir), "output_file" + ext)
     write_kwargs = {}
-    if ogr_driver == "SQLite":
+    if driver == "SQLite":
         write_kwargs["spatialite"] = True
 
         # This if statement can be removed once minimal fiona version >= 1.8.20
@@ -285,22 +293,20 @@
         ):
             write_kwargs["geometry_type"] = "Point Z"
 
-    expected_error = _expected_error_on(geodataframe, ogr_driver)
+    expected_error = _expected_error_on(geodataframe, driver)
     if expected_error:
         with pytest.raises(
             RuntimeError, match="Failed to write record|Could not add feature 
to layer"
         ):
             geodataframe.to_file(
-                output_file, driver=ogr_driver, engine=engine, **write_kwargs
+                output_file, driver=driver, engine=engine, **write_kwargs
             )
     else:
-        geodataframe.to_file(
-            output_file, driver=ogr_driver, engine=engine, **write_kwargs
-        )
+        geodataframe.to_file(output_file, driver=driver, engine=engine, 
**write_kwargs)
 
         reloaded = geopandas.read_file(output_file, engine=engine)
 
-        if ogr_driver == "GeoJSON" and engine == "pyogrio":
+        if driver == "GeoJSON" and engine == "pyogrio":
             # For GeoJSON files, the int64 column comes back as int32
             reloaded["a"] = reloaded["a"].astype("int64")
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/io/util.py 
new/geopandas-0.14.4/geopandas/io/util.py
--- old/geopandas-0.14.3/geopandas/io/util.py   1970-01-01 01:00:00.000000000 
+0100
+++ new/geopandas-0.14.4/geopandas/io/util.py   2024-04-28 15:49:10.000000000 
+0200
@@ -0,0 +1,118 @@
+"""Vendored, cut down version of pyogrio/util.py for use with fiona"""
+
+import re
+import sys
+from urllib.parse import urlparse
+
+
+def vsi_path(path: str) -> str:
+    """
+    Ensure path is a local path or a GDAL-compatible vsi path.
+
+    """
+
+    # path is already in GDAL format
+    if path.startswith("/vsi"):
+        return path
+
+    # Windows drive letters (e.g. "C:\") confuse `urlparse` as they look like
+    # URL schemes
+    if sys.platform == "win32" and re.match("^[a-zA-Z]\\:", path):
+        if not path.split("!")[0].endswith(".zip"):
+            return path
+
+        # prefix then allow to proceed with remaining parsing
+        path = f"zip://{path}"
+
+    path, archive, scheme = _parse_uri(path)
+
+    if scheme or archive or path.endswith(".zip"):
+        return _construct_vsi_path(path, archive, scheme)
+
+    return path
+
+
+# Supported URI schemes and their mapping to GDAL's VSI suffix.
+SCHEMES = {
+    "file": "file",
+    "zip": "zip",
+    "tar": "tar",
+    "gzip": "gzip",
+    "http": "curl",
+    "https": "curl",
+    "ftp": "curl",
+    "s3": "s3",
+    "gs": "gs",
+    "az": "az",
+    "adls": "adls",
+    "adl": "adls",  # fsspec uses this
+    "hdfs": "hdfs",
+    "webhdfs": "webhdfs",
+    # GDAL additionally supports oss and swift for remote filesystems, but
+    # those are for now not added as supported URI
+}
+
+CURLSCHEMES = {k for k, v in SCHEMES.items() if v == "curl"}
+
+
+def _parse_uri(path: str):
+    """
+    Parse a URI
+
+    Returns a tuples of (path, archive, scheme)
+
+    path : str
+        Parsed path. Includes the hostname and query string in the case
+        of a URI.
+    archive : str
+        Parsed archive path.
+    scheme : str
+        URI scheme such as "https" or "zip+s3".
+    """
+    parts = urlparse(path)
+
+    # if the scheme is not one of GDAL's supported schemes, return raw path
+    if parts.scheme and not all(p in SCHEMES for p in parts.scheme.split("+")):
+        return path, "", ""
+
+    # we have a URI
+    path = parts.path
+    scheme = parts.scheme or ""
+
+    if parts.query:
+        path += "?" + parts.query
+
+    if parts.scheme and parts.netloc:
+        path = parts.netloc + path
+
+    parts = path.split("!")
+    path = parts.pop() if parts else ""
+    archive = parts.pop() if parts else ""
+    return (path, archive, scheme)
+
+
+def _construct_vsi_path(path, archive, scheme) -> str:
+    """Convert a parsed path to a GDAL VSI path"""
+
+    prefix = ""
+    suffix = ""
+    schemes = scheme.split("+")
+
+    if "zip" not in schemes and (archive.endswith(".zip") or 
path.endswith(".zip")):
+        schemes.insert(0, "zip")
+
+    if schemes:
+        prefix = "/".join(
+            "vsi{0}".format(SCHEMES[p]) for p in schemes if p and p != "file"
+        )
+
+        if schemes[-1] in CURLSCHEMES:
+            suffix = f"{schemes[-1]}://"
+
+    if prefix:
+        if archive:
+            return "/{}/{}{}/{}".format(prefix, suffix, archive, 
path.lstrip("/"))
+        else:
+            return "/{}/{}{}".format(prefix, suffix, path)
+
+    return path
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/tests/test_array.py 
new/geopandas-0.14.4/geopandas/tests/test_array.py
--- old/geopandas-0.14.3/geopandas/tests/test_array.py  2024-01-31 
20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/geopandas/tests/test_array.py  2024-04-28 
15:49:10.000000000 +0200
@@ -281,6 +281,9 @@
         ("geom_almost_equals", (3,)),
     ],
 )
+# filters required for attr=geom_almost_equals only
+@pytest.mark.filterwarnings(r"ignore:The \'geom_almost_equals\(\)\' method is 
deprecat")
+@pytest.mark.filterwarnings(r"ignore:The \'almost_equals\(\)\' method is 
deprecated")
 def test_predicates_vector_scalar(attr, args):
     na_value = False
 
@@ -320,6 +323,9 @@
         ("geom_almost_equals", (3,)),
     ],
 )
+# filters required for attr=geom_almost_equals only
+@pytest.mark.filterwarnings(r"ignore:The \'geom_almost_equals\(\)\' method is 
deprecat")
+@pytest.mark.filterwarnings(r"ignore:The \'almost_equals\(\)\' method is 
deprecated")
 def test_predicates_vector_vector(attr, args):
     na_value = False
     empty_value = True if attr == "disjoint" else False
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/tests/test_crs.py 
new/geopandas-0.14.4/geopandas/tests/test_crs.py
--- old/geopandas-0.14.3/geopandas/tests/test_crs.py    2024-01-31 
20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/geopandas/tests/test_crs.py    2024-04-28 
15:49:10.000000000 +0200
@@ -1,4 +1,5 @@
 import random
+import warnings
 
 import numpy as np
 import pandas as pd
@@ -82,6 +83,9 @@
     assert result.has_z.all()
 
 
+# pyproj + numpy 1.25 trigger warning for single-element array -> recommdation 
is to
+# ignore the warning for now (https://github.com/pyproj4/pyproj/issues/1307)
+@pytest.mark.filterwarnings("ignore:Conversion of an array 
with:DeprecationWarning")
 def test_to_crs_dimension_mixed():
     s = GeoSeries([Point(1, 2), LineString([(1, 2, 3), (4, 5, 6)])], crs=2056)
     result = s.to_crs(epsg=4326)
@@ -150,6 +154,9 @@
     assert_geodataframe_equal(df, utm, check_less_precise=True, 
check_crs=False)
 
 
+# pyproj + numpy 1.25 trigger warning for single-element array -> recommdation 
is to
+# ignore the warning for now (https://github.com/pyproj4/pyproj/issues/1307)
+@pytest.mark.filterwarnings("ignore:Conversion of an array 
with:DeprecationWarning")
 def test_crs_axis_order__always_xy():
     df = GeoDataFrame(geometry=[Point(-1683723, 6689139)], crs="epsg:26918")
     lonlat = df.to_crs("epsg:4326")
@@ -319,7 +326,11 @@
             df.crs = 27700
 
         # geometry column without geometry
-        df = GeoDataFrame({"geometry": [Point(0, 1)]}).assign(geometry=[0])
+        with warnings.catch_warnings():
+            warnings.filterwarnings(
+                "ignore", "Geometry column does not contain geometry", 
UserWarning
+            )
+            df = GeoDataFrame({"geometry": [Point(0, 1)]}).assign(geometry=[0])
         with pytest.raises(
             ValueError,
             match="Assigning CRS to a GeoDataFrame without an active geometry",
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/tests/test_dissolve.py 
new/geopandas-0.14.4/geopandas/tests/test_dissolve.py
--- old/geopandas-0.14.3/geopandas/tests/test_dissolve.py       2024-01-31 
20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/geopandas/tests/test_dissolve.py       2024-04-28 
15:49:10.000000000 +0200
@@ -95,7 +95,7 @@
         )
         # for non pandas "mean", numeric only cannot be applied. Drop columns 
manually
         test2 = nybb_polydf.drop(columns=["BoroName"]).dissolve(
-            "manhattan_bronx", aggfunc=np.mean
+            "manhattan_bronx", aggfunc="mean"
         )
 
     assert_frame_equal(expected_mean, test, check_column_type=False)
@@ -261,6 +261,7 @@
 
     # when observed=False we get an additional observation
     # that wasn't in the original data
+    none_val = None
     expected_gdf_observed_false = geopandas.GeoDataFrame(
         {
             "cat": pd.Categorical(["a", "a", "b", "b"]),
@@ -268,7 +269,7 @@
             "geometry": geopandas.array.from_wkt(
                 [
                     "MULTIPOINT (0 0, 1 1)",
-                    None,
+                    none_val,
                     "POINT (2 2)",
                     "POINT (3 3)",
                 ]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/geopandas-0.14.3/geopandas/tests/test_extension_array.py 
new/geopandas-0.14.4/geopandas/tests/test_extension_array.py
--- old/geopandas-0.14.3/geopandas/tests/test_extension_array.py        
2024-01-31 20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/geopandas/tests/test_extension_array.py        
2024-04-28 15:49:10.000000000 +0200
@@ -13,12 +13,14 @@
 expected to be available to pytest by the inherited pandas tests).
 
 """
+
+import itertools
 import operator
 
 import numpy as np
 from numpy.testing import assert_array_equal
 import pandas as pd
-from pandas.testing import assert_series_equal
+from pandas.testing import assert_series_equal, assert_frame_equal
 from pandas.tests.extension import base as extension_tests
 
 import shapely.geometry
@@ -357,7 +359,73 @@
 
 
 class TestReshaping(extension_tests.BaseReshapingTests):
-    pass
+    # NOTE: this test is copied from pandas/tests/extension/base/reshaping.py
+    # because starting with pandas 3.0 the assert_frame_equal is strict 
regarding
+    # the exact missing value (None vs NaN)
+    # Our `result` uses None, but the way the `expected` is created results in
+    # NaNs (and specifying to use None as fill value in unstack also does not
+    # help)
+    # -> the only change compared to the upstream test is marked
+    @pytest.mark.parametrize(
+        "index",
+        [
+            # Two levels, uniform.
+            pd.MultiIndex.from_product(([["A", "B"], ["a", "b"]]), names=["a", 
"b"]),
+            # non-uniform
+            pd.MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "b")]),
+            # three levels, non-uniform
+            pd.MultiIndex.from_product([("A", "B"), ("a", "b", "c"), (0, 1, 
2)]),
+            pd.MultiIndex.from_tuples(
+                [
+                    ("A", "a", 1),
+                    ("A", "b", 0),
+                    ("A", "a", 0),
+                    ("B", "a", 0),
+                    ("B", "c", 1),
+                ]
+            ),
+        ],
+    )
+    @pytest.mark.parametrize("obj", ["series", "frame"])
+    def test_unstack(self, data, index, obj):
+        data = data[: len(index)]
+        if obj == "series":
+            ser = pd.Series(data, index=index)
+        else:
+            ser = pd.DataFrame({"A": data, "B": data}, index=index)
+
+        n = index.nlevels
+        levels = list(range(n))
+        # [0, 1, 2]
+        # [(0,), (1,), (2,), (0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
+        combinations = itertools.chain.from_iterable(
+            itertools.permutations(levels, i) for i in range(1, n)
+        )
+
+        for level in combinations:
+            result = ser.unstack(level=level)
+            assert all(
+                isinstance(result[col].array, type(data)) for col in 
result.columns
+            )
+
+            if obj == "series":
+                # We should get the same result with to_frame+unstack+droplevel
+                df = ser.to_frame()
+
+                alt = df.unstack(level=level).droplevel(0, axis=1)
+                assert_frame_equal(result, alt)
+
+            obj_ser = ser.astype(object)
+
+            expected = obj_ser.unstack(level=level, 
fill_value=data.dtype.na_value)
+            if obj == "series":
+                assert (expected.dtypes == object).all()
+            # <------------ next line is added
+            expected[expected.isna()] = None
+            # ------------->
+
+            result = result.astype(object)
+            assert_frame_equal(result, expected)
 
 
 class TestGetitem(extension_tests.BaseGetitemTests):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/geopandas-0.14.3/geopandas/tests/test_geodataframe.py 
new/geopandas-0.14.4/geopandas/tests/test_geodataframe.py
--- old/geopandas-0.14.3/geopandas/tests/test_geodataframe.py   2024-01-31 
20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/geopandas/tests/test_geodataframe.py   2024-04-28 
15:49:10.000000000 +0200
@@ -997,6 +997,7 @@
             " test sjoin_nearest"
         ),
     )
+    @pytest.mark.filterwarnings("ignore:Geometry is in a geographic 
CRS:UserWarning")
     def test_sjoin_nearest(self, how, max_distance, distance_col):
         """
         Basic test for availability of the GeoDataFrame method. Other
@@ -1329,7 +1330,8 @@
         ):
             gdf5["geometry"] = "foo"
         assert gdf5._geometry_column_name is None
-        gdf3 = gdf.copy().assign(geometry=geo_col)
+        with pytest.warns(FutureWarning, match=match):
+            gdf3 = gdf.copy().assign(geometry=geo_col)
         assert gdf3._geometry_column_name == "geometry"
 
         # Check that adding a GeoSeries to a column called "geometry" to a
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/geopandas-0.14.3/geopandas/tests/test_geom_methods.py 
new/geopandas-0.14.4/geopandas/tests/test_geom_methods.py
--- old/geopandas-0.14.3/geopandas/tests/test_geom_methods.py   2024-01-31 
20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/geopandas/tests/test_geom_methods.py   2024-04-28 
15:49:10.000000000 +0200
@@ -1298,7 +1298,8 @@
             [
                 "POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))",
                 "POLYGON ((2 0, 2 3, 3 3, 3 0, 2 0))",
-            ]
+            ],
+            crs=3857,
         )
 
         assert np.all(r.normalize().geom_equals_exact(exp, 0.001))
@@ -1333,7 +1334,7 @@
     def test_minimum_bounding_circle(self):
         mbc = self.g1.minimum_bounding_circle()
         centers = GeoSeries([Point(0.5, 0.5)] * 2)
-        assert np.all(mbc.centroid.geom_almost_equals(centers, 0.001))
+        assert np.all(mbc.centroid.geom_equals_exact(centers, 0.001))
         assert_series_equal(
             mbc.area,
             Series([1.560723, 1.560723]),
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/tests/test_geoseries.py 
new/geopandas-0.14.4/geopandas/tests/test_geoseries.py
--- old/geopandas-0.14.3/geopandas/tests/test_geoseries.py      2024-01-31 
20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/geopandas/tests/test_geoseries.py      2024-04-28 
15:49:10.000000000 +0200
@@ -1,7 +1,6 @@
 import json
 import os
 import random
-import re
 import shutil
 import tempfile
 import warnings
@@ -145,27 +144,37 @@
         exp = pd.Series([False, False], index=["A", "B"])
         assert_series_equal(a, exp)
 
+    @pytest.mark.filterwarnings(r"ignore:The 
'geom_almost_equals\(\)':FutureWarning")
     def test_geom_almost_equals(self):
         # TODO: test decimal parameter
-        with pytest.warns(FutureWarning, match=re.escape("The 
'geom_almost_equals()'")):
-            assert np.all(self.g1.geom_almost_equals(self.g1))
-            assert_array_equal(self.g1.geom_almost_equals(self.sq), [False, 
True])
-
-            assert_array_equal(
-                self.a1.geom_almost_equals(self.a2, align=True), [False, True, 
False]
+        assert np.all(self.g1.geom_almost_equals(self.g1))
+        assert_array_equal(self.g1.geom_almost_equals(self.sq), [False, True])
+        with warnings.catch_warnings():
+            warnings.filterwarnings(
+                "ignore",
+                "The indices of the two GeoSeries are different",
+                UserWarning,
             )
             assert_array_equal(
-                self.a1.geom_almost_equals(self.a2, align=False), [False, 
False]
+                self.a1.geom_almost_equals(self.a2, align=True),
+                [False, True, False],
             )
+        assert_array_equal(
+            self.a1.geom_almost_equals(self.a2, align=False), [False, False]
+        )
 
     def test_geom_equals_exact(self):
         # TODO: test tolerance parameter
         assert np.all(self.g1.geom_equals_exact(self.g1, 0.001))
         assert_array_equal(self.g1.geom_equals_exact(self.sq, 0.001), [False, 
True])
-
-        assert_array_equal(
-            self.a1.geom_equals_exact(self.a2, 0.001, align=True), [False, 
True, False]
-        )
+        with warnings.catch_warnings():
+            warnings.filterwarnings(
+                "ignore", "The indices of the two GeoSeries are different", 
UserWarning
+            )
+            assert_array_equal(
+                self.a1.geom_equals_exact(self.a2, 0.001, align=True),
+                [False, True, False],
+            )
         assert_array_equal(
             self.a1.geom_equals_exact(self.a2, 0.001, align=False), [False, 
False]
         )
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/tests/test_overlay.py 
new/geopandas-0.14.4/geopandas/tests/test_overlay.py
--- old/geopandas-0.14.3/geopandas/tests/test_overlay.py        2024-01-31 
20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/geopandas/tests/test_overlay.py        2024-04-28 
15:49:10.000000000 +0200
@@ -212,6 +212,10 @@
         expected.loc[24, "geometry"] = None
         result.loc[24, "geometry"] = None
 
+    # missing values get read as None in read_file for a string column, but
+    # are introduced as NaN by overlay
+    expected["BoroName"] = expected["BoroName"].fillna(np.nan)
+
     assert_geodataframe_equal(
         result,
         expected,
@@ -514,6 +518,12 @@
         expected = expected.sort_values(cols, axis=0).reset_index(drop=True)
         result = result.sort_values(cols, axis=0).reset_index(drop=True)
 
+        # some columns are all-NaN in the result, but get read as object dtype
+        # column of None values in read_file
+        for col in ["col1", "col3", "col4"]:
+            if col in expected.columns and expected[col].isna().all():
+                expected[col] = expected[col].astype("float64")
+
         assert_geodataframe_equal(
             result,
             expected,
@@ -854,7 +864,7 @@
 
     def test_intersection(self):
         df_result = overlay(self.layer_a, self.layer_b, how="intersection")
-        assert df_result.geom_equals(self.intersection).bool()
+        assert df_result.geom_equals(self.intersection).all()
 
     def test_union(self):
         df_result = overlay(self.layer_a, self.layer_b, how="union")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/geopandas-0.14.3/geopandas/tests/test_pandas_methods.py 
new/geopandas-0.14.4/geopandas/tests/test_pandas_methods.py
--- old/geopandas-0.14.3/geopandas/tests/test_pandas_methods.py 2024-01-31 
20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/geopandas/tests/test_pandas_methods.py 2024-04-28 
15:49:10.000000000 +0200
@@ -754,6 +754,7 @@
     np.testing.assert_allclose(result, expected)
 
 
+@pytest.mark.skipif(compat.PANDAS_GE_30, reason="convert_dtype is removed in 
pandas 3")
 def test_apply_convert_dtypes_keyword(s):
     # ensure the convert_dtypes keyword is accepted
     if not compat.PANDAS_GE_21:
@@ -879,3 +880,20 @@
 
     with pytest.raises(ValueError):
         pd.concat([df, df])
+
+
+@pytest.mark.skipif(
+    not compat.SHAPELY_GE_20, reason="ufunc only exists in shapely >= 2"
+)
+def test_ufunc():
+    # this is calling a shapely ufunc, but we currently rely on pandas' 
implementation
+    # of `__array_ufunc__` to wrap the result back into a GeoSeries
+    ser = GeoSeries([Point(1, 1), Point(2, 2), Point(3, 3)])
+    result = shapely.buffer(ser, 2)
+    assert isinstance(result, GeoSeries)
+
+    # ensure the result is still writeable
+    # (https://github.com/geopandas/geopandas/issues/3178)
+    assert result.array._data.flags.writeable
+    result.loc[0] = Point(10, 10)
+    assert result.iloc[0] == Point(10, 10)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/tests/test_plotting.py 
new/geopandas-0.14.4/geopandas/tests/test_plotting.py
--- old/geopandas-0.14.3/geopandas/tests/test_plotting.py       2024-01-31 
20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/geopandas/tests/test_plotting.py       2024-04-28 
15:49:10.000000000 +0200
@@ -324,7 +324,14 @@
 
         gdf = GeoDataFrame(geometry=[point, empty_point, point_])
         gdf["geometry"] = gdf.intersection(poly)
-        gdf.loc[3] = [None]
+        with warnings.catch_warnings():
+            # loc to add row calls concat internally, warning for pandas >=2.1
+            warnings.filterwarnings(
+                "ignore",
+                "The behavior of DataFrame concatenation with empty",
+                FutureWarning,
+            )
+            gdf.loc[3] = [None]
         ax = gdf.plot()
         assert len(ax.collections) == 1
 
@@ -1133,6 +1140,9 @@
         assert ax3.get_aspect() == 0.5
 
 
+@pytest.mark.filterwarnings(
+    "ignore:Numba not installed. Using slow pure python version.:UserWarning"
+)
 class TestMapclassifyPlotting:
     @classmethod
     def setup_class(cls):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/tools/overlay.py 
new/geopandas-0.14.4/geopandas/tools/overlay.py
--- old/geopandas-0.14.3/geopandas/tools/overlay.py     2024-01-31 
20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/geopandas/tools/overlay.py     2024-04-28 
15:49:10.000000000 +0200
@@ -6,6 +6,7 @@
 
 from geopandas import GeoDataFrame, GeoSeries
 from geopandas.array import _check_crs, _crs_mismatch_warn
+from geopandas._compat import PANDAS_GE_30
 
 
 def _ensure_geometry_column(df):
@@ -14,12 +15,15 @@
     If another column with that name exists, it will be dropped.
     """
     if not df._geometry_column_name == "geometry":
-        if "geometry" in df.columns:
-            df.drop("geometry", axis=1, inplace=True)
-        df.rename(
-            columns={df._geometry_column_name: "geometry"}, copy=False, 
inplace=True
-        )
-        df.set_geometry("geometry", inplace=True)
+        if PANDAS_GE_30:
+            if "geometry" in df.columns:
+                df = df.drop("geometry", axis=1)
+            df = df.rename_geometry("geometry")
+        else:
+            if "geometry" in df.columns:
+                df.drop("geometry", axis=1, inplace=True)
+            df.rename_geometry("geometry", inplace=True)
+    return df
 
 
 def _overlay_intersection(df1, df2):
@@ -112,8 +116,8 @@
     dfdiff1["__idx2"] = np.nan
     dfdiff2["__idx1"] = np.nan
     # ensure geometry name (otherwise merge goes wrong)
-    _ensure_geometry_column(dfdiff1)
-    _ensure_geometry_column(dfdiff2)
+    dfdiff1 = _ensure_geometry_column(dfdiff1)
+    dfdiff2 = _ensure_geometry_column(dfdiff2)
     # combine both 'difference' dataframes
     dfsym = dfdiff1.merge(
         dfdiff2, on=["__idx1", "__idx2"], how="outer", suffixes=("_1", "_2")
@@ -136,7 +140,14 @@
     """
     dfinter = _overlay_intersection(df1, df2)
     dfsym = _overlay_symmetric_diff(df1, df2)
-    dfunion = pd.concat([dfinter, dfsym], ignore_index=True, sort=False)
+    with warnings.catch_warnings():
+        # pandas GH52532 FutureWarning, fix new behaviour if needed when it is 
added
+        warnings.filterwarnings(
+            "ignore",
+            "The behavior of DataFrame concatenation with empty",
+            FutureWarning,
+        )
+        dfunion = pd.concat([dfinter, dfsym], ignore_index=True, sort=False)
     # keep geometry column last
     columns = list(dfunion.columns)
     columns.remove("geometry")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/geopandas-0.14.3/geopandas/tools/tests/test_random.py 
new/geopandas-0.14.4/geopandas/tools/tests/test_random.py
--- old/geopandas-0.14.3/geopandas/tools/tests/test_random.py   2024-01-31 
20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/geopandas/tools/tests/test_random.py   2024-04-28 
15:49:10.000000000 +0200
@@ -22,7 +22,9 @@
 )
 def test_uniform(geom, size):
     sample = uniform(geom, size=size, rng=1)
-    sample_series = 
geopandas.GeoSeries(sample).explode().reset_index(drop=True)
+    sample_series = (
+        
geopandas.GeoSeries(sample).explode(index_parts=True).reset_index(drop=True)
+    )
     assert len(sample_series) == size
     sample_in_geom = sample_series.buffer(0.00000001).sindex.query(
         geom, predicate="intersects"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas/tools/tests/test_sjoin.py 
new/geopandas-0.14.4/geopandas/tools/tests/test_sjoin.py
--- old/geopandas-0.14.3/geopandas/tools/tests/test_sjoin.py    2024-01-31 
20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/geopandas/tools/tests/test_sjoin.py    2024-04-28 
15:49:10.000000000 +0200
@@ -527,7 +527,7 @@
 
     def test_sjoin_empty_geometries(self):
         # https://github.com/geopandas/geopandas/issues/944
-        empty = GeoDataFrame(geometry=[GeometryCollection()] * 3)
+        empty = GeoDataFrame(geometry=[GeometryCollection()] * 3, crs=self.crs)
         df = sjoin(pd.concat([self.pointdf, empty]), self.polydf, how="left")
         assert df.shape == (24, 8)
         df2 = sjoin(self.pointdf, pd.concat([self.polydf, empty]), how="left")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas.egg-info/PKG-INFO 
new/geopandas-0.14.4/geopandas.egg-info/PKG-INFO
--- old/geopandas-0.14.3/geopandas.egg-info/PKG-INFO    2024-01-31 
20:21:48.000000000 +0100
+++ new/geopandas-0.14.4/geopandas.egg-info/PKG-INFO    2024-04-28 
15:49:18.000000000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: geopandas
-Version: 0.14.3
+Version: 0.14.4
 Summary: Geographic pandas extensions
 Author-email: Kelsey Jordahl <kjord...@alum.mit.edu>
 Maintainer: GeoPandas contributors
@@ -19,6 +19,7 @@
 Description-Content-Type: text/x-rst
 License-File: LICENSE.txt
 Requires-Dist: fiona>=1.8.21
+Requires-Dist: numpy>=1.22
 Requires-Dist: packaging
 Requires-Dist: pandas>=1.4.0
 Requires-Dist: pyproj>=3.3.0
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas.egg-info/SOURCES.txt 
new/geopandas-0.14.4/geopandas.egg-info/SOURCES.txt
--- old/geopandas-0.14.3/geopandas.egg-info/SOURCES.txt 2024-01-31 
20:21:48.000000000 +0100
+++ new/geopandas-0.14.4/geopandas.egg-info/SOURCES.txt 2024-04-28 
15:49:18.000000000 +0200
@@ -45,6 +45,7 @@
 geopandas/io/arrow.py
 geopandas/io/file.py
 geopandas/io/sql.py
+geopandas/io/util.py
 geopandas/io/tests/__init__.py
 geopandas/io/tests/generate_legacy_storage_files.py
 geopandas/io/tests/test_arrow.py
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/geopandas.egg-info/requires.txt 
new/geopandas-0.14.4/geopandas.egg-info/requires.txt
--- old/geopandas-0.14.3/geopandas.egg-info/requires.txt        2024-01-31 
20:21:48.000000000 +0100
+++ new/geopandas-0.14.4/geopandas.egg-info/requires.txt        2024-04-28 
15:49:18.000000000 +0200
@@ -1,4 +1,5 @@
 fiona>=1.8.21
+numpy>=1.22
 packaging
 pandas>=1.4.0
 pyproj>=3.3.0
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/geopandas-0.14.3/pyproject.toml 
new/geopandas-0.14.4/pyproject.toml
--- old/geopandas-0.14.3/pyproject.toml 2024-01-31 20:21:34.000000000 +0100
+++ new/geopandas-0.14.4/pyproject.toml 2024-04-28 15:49:10.000000000 +0200
@@ -22,6 +22,7 @@
 requires-python = ">=3.9"
 dependencies = [
     "fiona >= 1.8.21",
+    "numpy >= 1.22",
     "packaging",
     "pandas >= 1.4.0",
     "pyproj >= 3.3.0",

Reply via email to