Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package python-s3fs for openSUSE:Factory 
checked in at 2021-08-19 10:00:50
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-s3fs (Old)
 and      /work/SRC/openSUSE:Factory/.python-s3fs.new.1899 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-s3fs"

Thu Aug 19 10:00:50 2021 rev:9 rq:912386 version:2021.7.0

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-s3fs/python-s3fs.changes  2021-05-21 
21:50:00.434235654 +0200
+++ /work/SRC/openSUSE:Factory/.python-s3fs.new.1899/python-s3fs.changes        
2021-08-19 10:01:50.471203960 +0200
@@ -1,0 +2,14 @@
+Fri Aug 13 23:41:25 UTC 2021 - Ben Greiner <c...@bnavigator.de>
+
+- Update to 2021.07.0
+  * make bucket in put(recursive) (#496)
+  * non-truthy prefixes (#497)
+  * implement rm_file (#499)
+- Release 2021.06.1
+  * bucket region caching (#495)
+- Release 2021.06.0
+  * support "prefix" in directory listings (#486)
+  * support negative index in cat_file (#487, 488)
+  * don't requite ETag in file details (#480) 
+
+-------------------------------------------------------------------

Old:
----
  s3fs-2021.5.0.tar.gz

New:
----
  s3fs-2021.7.0.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-s3fs.spec ++++++
--- /var/tmp/diff_new_pack.js8wF7/_old  2021-08-19 10:01:50.927203405 +0200
+++ /var/tmp/diff_new_pack.js8wF7/_new  2021-08-19 10:01:50.931203401 +0200
@@ -19,7 +19,7 @@
 %{?!python_module:%define python_module() python3-%{**}}
 %define skip_python2 1
 Name:           python-s3fs
-Version:        2021.5.0
+Version:        2021.7.0
 Release:        0
 Summary:        Python filesystem interface for S3
 License:        BSD-3-Clause
@@ -28,7 +28,7 @@
 BuildRequires:  %{python_module Flask}
 BuildRequires:  %{python_module aiobotocore >= 1.0.1}
 BuildRequires:  %{python_module boto3}
-BuildRequires:  %{python_module fsspec >= 2021.5.0}
+BuildRequires:  %{python_module fsspec >= 2021.7.0}
 BuildRequires:  %{python_module moto-server >= 2.0}
 BuildRequires:  %{python_module pytest >= 4.2.0}
 BuildRequires:  %{python_module pytest-env}
@@ -39,7 +39,7 @@
 BuildRequires:  python-mock
 %endif
 Requires:       python-aiobotocore >= 1.0.1
-Requires:       python-fsspec >= 2021.5.0
+Requires:       python-fsspec >= 2021.7.0
 Recommends:     aws-cli
 Recommends:     python-boto3
 BuildArch:      noarch

++++++ s3fs-2021.5.0.tar.gz -> s3fs-2021.7.0.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3fs-2021.5.0/PKG-INFO new/s3fs-2021.7.0/PKG-INFO
--- old/s3fs-2021.5.0/PKG-INFO  2021-05-14 17:01:12.000000000 +0200
+++ new/s3fs-2021.7.0/PKG-INFO  2021-07-13 15:59:25.690399600 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: s3fs
-Version: 2021.5.0
+Version: 2021.7.0
 Summary: Convenient Filesystem interface over S3
 Home-page: http://github.com/dask/s3fs/
 Maintainer: Martin Durant
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3fs-2021.5.0/docs/source/api.rst 
new/s3fs-2021.7.0/docs/source/api.rst
--- old/s3fs-2021.5.0/docs/source/api.rst       2020-08-10 22:49:16.000000000 
+0200
+++ new/s3fs-2021.7.0/docs/source/api.rst       2021-06-07 20:41:14.000000000 
+0200
@@ -8,6 +8,7 @@
    S3FileSystem.cat
    S3FileSystem.du
    S3FileSystem.exists
+   S3FileSystem.find
    S3FileSystem.get
    S3FileSystem.glob
    S3FileSystem.info
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3fs-2021.5.0/docs/source/changelog.rst 
new/s3fs-2021.7.0/docs/source/changelog.rst
--- old/s3fs-2021.5.0/docs/source/changelog.rst 2021-05-14 17:00:22.000000000 
+0200
+++ new/s3fs-2021.7.0/docs/source/changelog.rst 2021-07-13 15:58:39.000000000 
+0200
@@ -1,15 +1,31 @@
 Changelog
 =========
 
-Dev
----
+2021.07.0
+---------
+
+- make bucket in put(recursive) (#496)
+- non-truthy prefixes (#497)
+- implement rm_file (#499)
+
+2021.06.1
+---------
+
+- bucket region caching (#495)
+
+2021.06.0
+---------
+
+- support "prefix" in directory listings (#486)
+- support negative index in cat_file (#487, 488)
+- don't requite ETag in file details (#480)
 
 2021.05.0
 ---------
 
 - optimize ``info``,``exists`` (and related) calls for non-version aware mode
 - copy with entries without ETag (#480)
-- find not to corrups parent listing (#476)
+- find not to corrupts parent listing (#476)
 - short listing to determine directory (#472, 471)
 
 Version 2021.04.0
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3fs-2021.5.0/requirements.txt 
new/s3fs-2021.7.0/requirements.txt
--- old/s3fs-2021.5.0/requirements.txt  2021-05-14 16:58:47.000000000 +0200
+++ new/s3fs-2021.7.0/requirements.txt  2021-07-13 15:58:39.000000000 +0200
@@ -1,2 +1,2 @@
 aiobotocore>=1.0.1
-fsspec==2021.05.0
+fsspec==2021.07.0
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3fs-2021.5.0/s3fs/_version.py 
new/s3fs-2021.7.0/s3fs/_version.py
--- old/s3fs-2021.5.0/s3fs/_version.py  2021-05-14 17:01:12.000000000 +0200
+++ new/s3fs-2021.7.0/s3fs/_version.py  2021-07-13 15:59:25.691342800 +0200
@@ -8,11 +8,11 @@
 
 version_json = '''
 {
- "date": "2021-05-14T11:00:34-0400",
+ "date": "2021-07-13T09:58:32-0400",
  "dirty": false,
  "error": null,
- "full-revisionid": "a3d7a946f85b6dbef62ab75c61fe1319a482c8ba",
- "version": "2021.05.0"
+ "full-revisionid": "562ed04dcc508263541a5f94fe090ef8efc1372e",
+ "version": "2021.07.0"
 }
 '''  # END VERSION_JSON
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3fs-2021.5.0/s3fs/core.py 
new/s3fs-2021.7.0/s3fs/core.py
--- old/s3fs-2021.5.0/s3fs/core.py      2021-05-14 16:58:37.000000000 +0200
+++ new/s3fs-2021.7.0/s3fs/core.py      2021-07-13 15:49:26.000000000 +0200
@@ -9,7 +9,7 @@
 
 from fsspec.spec import AbstractBufferedFile
 from fsspec.utils import infer_storage_options, tokenize, setup_logging as 
setup_logger
-from fsspec.asyn import AsyncFileSystem, sync, sync_wrapper
+from fsspec.asyn import AsyncFileSystem, sync, sync_wrapper, FSTimeoutError
 
 import aiobotocore
 import botocore
@@ -18,7 +18,7 @@
 from botocore.exceptions import ClientError, ParamValidationError
 
 from s3fs.errors import translate_boto_error
-from s3fs.utils import ParamKwargsHelper, _get_brange, FileExpired
+from s3fs.utils import S3BucketRegionCache, ParamKwargsHelper, _get_brange, 
FileExpired
 
 
 logger = logging.getLogger("s3fs")
@@ -125,6 +125,10 @@
         Whether to support bucket versioning.  If enable this will require the
         user to have the necessary IAM permissions for dealing with versioned
         objects.
+    cache_regions : bool (False)
+        Whether to cache bucket regions or not. Whenever a new bucket is used,
+        it will first find out which region it belongs and then use the client
+        for that region.
     config_kwargs : dict of parameters passed to ``botocore.client.Config``
     kwargs : other parameters for core session
     session : aiobotocore AioSession object to be used for all connections.
@@ -173,6 +177,7 @@
         session=None,
         username=None,
         password=None,
+        cache_regions=False,
         asynchronous=False,
         loop=None,
         **kwargs
@@ -207,6 +212,7 @@
         self.req_kw = {"RequestPayer": "requester"} if requester_pays else {}
         self.s3_additional_kwargs = s3_additional_kwargs or {}
         self.use_ssl = use_ssl
+        self.cache_regions = cache_regions
         self._s3 = None
         self.session = None
 
@@ -221,9 +227,16 @@
     def _filter_kwargs(self, s3_method, kwargs):
         return self._kwargs_helper.filter_dict(s3_method.__name__, kwargs)
 
+    async def get_s3(self, bucket=None):
+        if self.cache_regions and bucket is not None:
+            return await self._s3creator.get_bucket_client(bucket)
+        else:
+            return self._s3
+
     async def _call_s3(self, method, *akwarglist, **kwargs):
         await self.set_session()
-        method = getattr(self.s3, method)
+        s3 = await self.get_s3(kwargs.get("Bucket"))
+        method = getattr(s3, method)
         kw2 = kwargs.copy()
         kw2.pop("Body", None)
         logger.debug("CALL: %s - %s - %s", method.__name__, akwarglist, kw2)
@@ -360,13 +373,36 @@
                 if key not in drop_keys
             }
             config_kwargs["signature_version"] = UNSIGNED
+
         conf = AioConfig(**config_kwargs)
         self.session = aiobotocore.AioSession(**self.kwargs)
-        s3creator = self.session.create_client(
-            "s3", config=conf, **init_kwargs, **client_kwargs
-        )
+
+        for parameters in (config_kwargs, self.kwargs, init_kwargs, 
client_kwargs):
+            for option in ("region_name", "endpoint_url"):
+                if parameters.get(option):
+                    self.cache_regions = False
+                    break
+        else:
+            cache_regions = self.cache_regions
+
+        logger.debug(
+            "RC: caching enabled? %r (explicit option is %r)",
+            cache_regions,
+            self.cache_regions,
+        )
+        self.cache_regions = cache_regions
+        if self.cache_regions:
+            s3creator = S3BucketRegionCache(
+                self.session, config=conf, **init_kwargs, **client_kwargs
+            )
+            self._s3 = await s3creator.get_client()
+        else:
+            s3creator = self.session.create_client(
+                "s3", config=conf, **init_kwargs, **client_kwargs
+            )
+            self._s3 = await s3creator.__aenter__()
+
         self._s3creator = s3creator
-        self._s3 = await s3creator.__aenter__()
         # the following actually closes the aiohttp connection; use of privates
         # might break in the future, would cause exception at gc time
         if not self.asynchronous:
@@ -384,7 +420,7 @@
             try:
                 sync(loop, s3.__aexit__, None, None, None, timeout=0.1)
                 return
-            except TimeoutError:
+            except FSTimeoutError:
                 pass
         try:
             # close the actual socket
@@ -509,14 +545,20 @@
             requester_pays=requester_pays,
         )
 
-    async def _lsdir(self, path, refresh=False, max_items=None, delimiter="/"):
-        bucket, prefix, _ = self.split_path(path)
-        prefix = prefix + "/" if prefix else ""
+    async def _lsdir(
+        self, path, refresh=False, max_items=None, delimiter="/", prefix=""
+    ):
+        bucket, key, _ = self.split_path(path)
+        if not prefix:
+            prefix = ""
+        if key:
+            prefix = key.lstrip("/") + "/" + prefix
         if path not in self.dircache or refresh or not delimiter:
             try:
                 logger.debug("Get directory listing page for %s" % path)
                 await self.set_session()
-                pag = self.s3.get_paginator("list_objects_v2")
+                s3 = await self.get_s3(bucket)
+                pag = s3.get_paginator("list_objects_v2")
                 config = {}
                 if max_items is not None:
                     config.update(MaxItems=max_items, PageSize=2 * max_items)
@@ -559,10 +601,30 @@
             return files
         return self.dircache[path]
 
-    async def _find(self, path, maxdepth=None, withdirs=None, detail=False):
+    async def _find(self, path, maxdepth=None, withdirs=None, detail=False, 
prefix=""):
+        """List all files below path.
+        Like posix ``find`` command without conditions
+        Parameters
+        ----------
+        path : str
+        maxdepth: int or None
+            If not None, the maximum number of levels to descend
+        withdirs: bool
+            Whether to include directory paths in the output. This is True
+            when used by glob, but users usually only want files.
+        prefix: str
+            Only return files that match ``^{path}/{prefix}`` (if there is an
+            exact match ``filename == {path}/{prefix}``, it also will be 
included)
+        """
+        path = self._strip_protocol(path)
         bucket, key, _ = self.split_path(path)
         if not bucket:
             raise ValueError("Cannot traverse all of S3")
+        if (withdirs or maxdepth) and prefix:
+            # TODO: perhaps propagate these to a glob(f"path/{prefix}*") call
+            raise ValueError(
+                "Can not specify 'prefix' option alongside 
'withdirs'/'maxdepth' options."
+            )
         if maxdepth:
             return await super()._find(
                 bucket + "/" + key, maxdepth=maxdepth, withdirs=withdirs, 
detail=detail
@@ -576,7 +638,7 @@
         #     elif len(out) == 0:
         #         return super().find(path)
         #     # else: we refresh anyway, having at least two missing trees
-        out = await self._lsdir(path, delimiter="")
+        out = await self._lsdir(path, delimiter="", prefix=prefix)
         if not out and key:
             try:
                 out = [await self._info(path)]
@@ -590,6 +652,7 @@
             if par not in self.dircache:
                 if par not in sdirs:
                     sdirs.add(par)
+                    d = False
                     if len(path) <= len(par):
                         d = {
                             "Key": self.split_path(par)[1],
@@ -603,19 +666,14 @@
                     thisdircache[par] = []
                     ppar = self._parent(par)
                     if ppar in thisdircache:
-                        if d not in thisdircache[ppar]:
+                        if d and d not in thisdircache[ppar]:
                             thisdircache[ppar].append(d)
             if par in sdirs:
                 thisdircache[par].append(o)
-        for k, v in thisdircache.items():
-            if k in self.dircache:
-                prev = self.dircache[k]
-                names = [p["name"] for p in prev]
-                for file in v:
-                    if v["name"] not in names:
-                        prev.append(v)
-            else:
-                self.dircache[k] = v
+        if not prefix:
+            for k, v in thisdircache.items():
+                if k not in self.dircache and len(k) >= len(path):
+                    self.dircache[k] = v
         if withdirs:
             out = sorted(out + dirs, key=lambda x: x["name"])
         if detail:
@@ -786,10 +844,8 @@
 
     async def _cat_file(self, path, version_id=None, start=None, end=None):
         bucket, key, vers = self.split_path(path)
-        if (start is None) ^ (end is None):
-            raise ValueError("Give start and end or neither")
-        if start is not None:
-            head = {"Range": "bytes=%i-%i" % (start, end - 1)}
+        if start is not None or end is not None:
+            head = {"Range": await self._process_limits(path, start, end)}
         else:
             head = {}
         resp = await self._call_s3(
@@ -844,13 +900,16 @@
 
     async def _put_file(self, lpath, rpath, chunksize=50 * 2 ** 20, **kwargs):
         bucket, key, _ = self.split_path(rpath)
-        if os.path.isdir(lpath) and key:
-            # don't make remote "directory"
-            return
+        if os.path.isdir(lpath):
+            if key:
+                # don't make remote "directory"
+                return
+            else:
+                await self._mkdir(lpath)
         size = os.path.getsize(lpath)
         with open(lpath, "rb") as f0:
             if size < min(5 * 2 ** 30, 2 * chunksize):
-                return await self._call_s3(
+                await self._call_s3(
                     "put_object", Bucket=bucket, Key=key, Body=f0, **kwargs
                 )
             else:
@@ -885,7 +944,9 @@
                     UploadId=mpu["UploadId"],
                     MultipartUpload={"Parts": parts},
                 )
-        self.invalidate_cache(rpath)
+        while rpath:
+            self.invalidate_cache(rpath)
+            rpath = self._parent(rpath)
 
     async def _get_file(self, rpath, lpath, version_id=None):
         bucket, key, vers = self.split_path(rpath)
@@ -1335,7 +1396,8 @@
         """
         bucket, key, version_id = self.split_path(path)
         await self.set_session()
-        return await self.s3.generate_presigned_url(
+        s3 = await self.get_s3(bucket)
+        return await s3.generate_presigned_url(
             ClientMethod="get_object",
             Params=dict(Bucket=bucket, Key=key, **version_id_kw(version_id), 
**kwargs),
             ExpiresIn=expires,
@@ -1575,6 +1637,14 @@
             self.invalidate_cache(self._parent(path))
         await self._call_s3("delete_objects", kwargs, Bucket=bucket, 
Delete=delete_keys)
 
+    async def _rm_file(self, path, **kwargs):
+        bucket, key, _ = self.split_path(path)
+
+        try:
+            await self._call_s3("delete_object", Bucket=bucket, Key=key)
+        except ClientError as e:
+            raise translate_boto_error(e)
+
     async def _rm(self, path, recursive=False, **kwargs):
         if recursive and isinstance(path, str):
             bucket, key, _ = self.split_path(path)
@@ -1607,7 +1677,8 @@
     async def _rm_versioned_bucket_contents(self, bucket):
         """Remove a versioned bucket and all contents"""
         await self.set_session()
-        pag = self.s3.get_paginator("list_object_versions")
+        s3 = await self.get_s3(bucket)
+        pag = s3.get_paginator("list_object_versions")
         async for plist in pag.paginate(Bucket=bucket):
             obs = plist.get("Versions", []) + plist.get("DeleteMarkers", [])
             delete_keys = {
@@ -1646,6 +1717,20 @@
     def sign(self, path, expiration=100, **kwargs):
         return self.url(path, expires=expiration, **kwargs)
 
+    async def _invalidate_region_cache(self):
+        """Invalidate the region cache (associated with buckets)
+        if ``cache_regions`` is turned on."""
+        if not self.cache_regions:
+            return None
+
+        # If the region cache is not initialized, then
+        # do nothing.
+        cache = getattr(self, "_s3creator", None)
+        if cache is not None:
+            await cache.clear()
+
+    invalidate_region_cache = sync_wrapper(_invalidate_region_cache)
+
 
 class S3File(AbstractBufferedFile):
     """
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3fs-2021.5.0/s3fs/tests/test_s3fs.py 
new/s3fs-2021.7.0/s3fs/tests/test_s3fs.py
--- old/s3fs-2021.5.0/s3fs/tests/test_s3fs.py   2021-05-14 16:58:47.000000000 
+0200
+++ new/s3fs-2021.7.0/s3fs/tests/test_s3fs.py   2021-07-13 15:49:26.000000000 
+0200
@@ -70,6 +70,14 @@
     import shlex
     import subprocess
 
+    try:
+        # should fail since we didn't start server yet
+        r = requests.get(endpoint_uri)
+    except:
+        pass
+    else:
+        if r.ok:
+            raise RuntimeError("moto server already up")
     if "AWS_SECRET_ACCESS_KEY" not in os.environ:
         os.environ["AWS_SECRET_ACCESS_KEY"] = "foo"
     if "AWS_ACCESS_KEY_ID" not in os.environ:
@@ -1785,6 +1793,18 @@
     asyncio.run(_())
 
 
+def test_cat_ranges(s3):
+    data = b"a string to select from"
+    fn = test_bucket_name + "/parts"
+    s3.pipe(fn, data)
+
+    assert s3.cat_file(fn) == data
+    assert s3.cat_file(fn, start=5) == data[5:]
+    assert s3.cat_file(fn, end=5) == data[:5]
+    assert s3.cat_file(fn, start=1, end=-1) == data[1:-1]
+    assert s3.cat_file(fn, start=-5) == data[-5:]
+
+
 @pytest.mark.skipif(sys.version_info < (3, 7), reason="no asyncio.run in py36")
 def test_async_s3_old(s3):
     async def _():
@@ -1928,7 +1948,7 @@
             pass
 
         infos = fs.find(base_dir, maxdepth=None, withdirs=True, detail=True)
-        assert len(infos) == 4
+        assert len(infos) == 5  # includes base_dir directory
 
         for info in infos.values():
             if info["name"].endswith(file_a):
@@ -1939,8 +1959,6 @@
                 assert info["type"] == "file"
             elif info["name"].rstrip("/").endswith(dir_a):
                 assert info["type"] == "directory"
-            else:
-                raise ValueError("unexpected path {}".format(info["name"]))
     finally:
         fs.rm(base_dir, recursive=True)
 
@@ -2099,3 +2117,56 @@
 
     s3.cp_file(file["name"], test_bucket_name + "/copy_tests/file2")
     assert s3.info(test_bucket_name + "/copy_tests/file2")["ETag"] is not None
+
+
+def test_find_with_prefix(s3):
+    for cursor in range(100):
+        s3.touch(test_bucket_name + f"/prefixes/test_{cursor}")
+
+    s3.touch(test_bucket_name + "/prefixes2")
+    assert len(s3.find(test_bucket_name + "/prefixes")) == 100
+    assert len(s3.find(test_bucket_name, prefix="prefixes")) == 101
+
+    assert len(s3.find(test_bucket_name + "/prefixes/test_")) == 0
+    assert len(s3.find(test_bucket_name + "/prefixes", prefix="test_")) == 100
+    assert len(s3.find(test_bucket_name + "/prefixes/", prefix="test_")) == 100
+
+    test_1s = s3.find(test_bucket_name + "/prefixes/test_1")
+    assert len(test_1s) == 1
+    assert test_1s[0] == test_bucket_name + "/prefixes/test_1"
+
+    test_1s = s3.find(test_bucket_name + "/prefixes/", prefix="test_1")
+    assert len(test_1s) == 11
+    assert test_1s == [test_bucket_name + "/prefixes/test_1"] + [
+        test_bucket_name + f"/prefixes/test_{cursor}" for cursor in range(10, 
20)
+    ]
+    assert s3.find(test_bucket_name + "/prefixes/") == s3.find(
+        test_bucket_name + "/prefixes/", prefix=None
+    )
+
+
+def test_list_after_find(s3):
+    before = s3.ls("s3://test")
+    s3.invalidate_cache("s3://test/2014-01-01.csv")
+    s3.find("s3://test/2014-01-01.csv")
+    after = s3.ls("s3://test")
+    assert before == after
+
+
+def test_upload_recursive_to_bucket(s3, tmpdir):
+    # GH#491
+    folders = [os.path.join(tmpdir, d) for d in ["outer", "outer/inner"]]
+    files = [os.path.join(tmpdir, f) for f in ["outer/afile", 
"outer/inner/bfile"]]
+    for d in folders:
+        os.mkdir(d)
+    for f in files:
+        open(f, "w").write("hello")
+    s3.put(folders[0], "newbucket", recursive=True)
+
+
+def test_rm_file(s3):
+    target = test_bucket_name + "/to_be_removed/file"
+    s3.touch(target)
+    s3.rm_file(target)
+    assert not s3.exists(target)
+    assert not s3.exists(test_bucket_name + "/to_be_removed")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3fs-2021.5.0/s3fs/utils.py 
new/s3fs-2021.7.0/s3fs/utils.py
--- old/s3fs-2021.5.0/s3fs/utils.py     2021-03-31 20:27:02.000000000 +0200
+++ new/s3fs-2021.7.0/s3fs/utils.py     2021-06-23 14:49:30.000000000 +0200
@@ -1,5 +1,10 @@
 import errno
+import logging
 from contextlib import contextmanager
+from botocore.exceptions import ClientError
+
+
+logger = logging.getLogger("s3fs")
 
 
 @contextmanager
@@ -10,6 +15,99 @@
         pass
 
 
+try:
+    from contextlib import AsyncExitStack
+except ImportError:
+    # Since AsyncExitStack is not available for 3.6<=
+    # we'll create a simple implementation that imitates
+    # the basic functionality.
+    class AsyncExitStack:
+        def __init__(self):
+            self.contexts = []
+
+        async def enter_async_context(self, context):
+            self.contexts.append(context)
+            return await context.__aenter__()
+
+        async def aclose(self, *args):
+            args = args or (None, None, None)
+            for context in self.contexts:
+                await context.__aexit__(*args)
+
+        async def __aenter__(self):
+            return self
+
+        async def __aexit__(self, *args):
+            await self.aclose(*args)
+
+
+class S3BucketRegionCache:
+    # See https://github.com/aio-libs/aiobotocore/issues/866
+    # for details.
+
+    def __init__(self, session, **client_kwargs):
+        self._session = session
+        self._stack = AsyncExitStack()
+        self._client = None
+        self._client_kwargs = client_kwargs
+        self._buckets = {}
+        self._regions = {}
+
+    async def get_bucket_client(self, bucket_name=None):
+        if bucket_name in self._buckets:
+            return self._buckets[bucket_name]
+
+        general_client = await self.get_client()
+        if bucket_name is None:
+            return general_client
+
+        try:
+            response = await general_client.head_bucket(Bucket=bucket_name)
+        except ClientError:
+            logger.debug(
+                "RC: HEAD_BUCKET call for %r has failed, returning the general 
client",
+                bucket_name,
+            )
+            return general_client
+        else:
+            region = 
response["ResponseMetadata"]["HTTPHeaders"]["x-amz-bucket-region"]
+
+        if region not in self._regions:
+            logger.debug(
+                "RC: Creating a new regional client for %r on the region %r",
+                bucket_name,
+                region,
+            )
+            self._regions[region] = await self._stack.enter_async_context(
+                self._session.create_client(
+                    "s3", region_name=region, **self._client_kwargs
+                )
+            )
+
+        client = self._buckets[bucket_name] = self._regions[region]
+        return client
+
+    async def get_client(self):
+        if not self._client:
+            self._client = await self._stack.enter_async_context(
+                self._session.create_client("s3", **self._client_kwargs)
+            )
+        return self._client
+
+    async def clear(self):
+        logger.debug("RC: discarding all clients")
+        self._buckets.clear()
+        self._regions.clear()
+        self._client = None
+        await self._stack.aclose()
+
+    async def __aenter__(self):
+        return self
+
+    async def __aexit__(self, *exc_args):
+        await self.clear()
+
+
 class FileExpired(IOError):
     """
     Is raised, when the file content has been changed from a different process 
after
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3fs-2021.5.0/s3fs.egg-info/PKG-INFO 
new/s3fs-2021.7.0/s3fs.egg-info/PKG-INFO
--- old/s3fs-2021.5.0/s3fs.egg-info/PKG-INFO    2021-05-14 17:01:12.000000000 
+0200
+++ new/s3fs-2021.7.0/s3fs.egg-info/PKG-INFO    2021-07-13 15:59:25.000000000 
+0200
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: s3fs
-Version: 2021.5.0
+Version: 2021.7.0
 Summary: Convenient Filesystem interface over S3
 Home-page: http://github.com/dask/s3fs/
 Maintainer: Martin Durant
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3fs-2021.5.0/s3fs.egg-info/requires.txt 
new/s3fs-2021.7.0/s3fs.egg-info/requires.txt
--- old/s3fs-2021.5.0/s3fs.egg-info/requires.txt        2021-05-14 
17:01:12.000000000 +0200
+++ new/s3fs-2021.7.0/s3fs.egg-info/requires.txt        2021-07-13 
15:59:25.000000000 +0200
@@ -1,5 +1,5 @@
 aiobotocore>=1.0.1
-fsspec==2021.05.0
+fsspec==2021.07.0
 
 [awscli]
 aiobotocore[awscli]

Reply via email to