Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package python-dogpile.cache for 
openSUSE:Factory checked in at 2024-03-18 16:47:27
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-dogpile.cache (Old)
 and      /work/SRC/openSUSE:Factory/.python-dogpile.cache.new.1905 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-dogpile.cache"

Mon Mar 18 16:47:27 2024 rev:42 rq:1158983 version:1.3.2

Changes:
--------
--- 
/work/SRC/openSUSE:Factory/python-dogpile.cache/python-dogpile.cache.changes    
    2024-02-08 19:03:33.822131284 +0100
+++ 
/work/SRC/openSUSE:Factory/.python-dogpile.cache.new.1905/python-dogpile.cache.changes
      2024-03-18 16:47:52.482295886 +0100
@@ -1,0 +2,11 @@
+Mon Mar 18 12:15:51 UTC 2024 - Dirk Müller <dmuel...@suse.com>
+
+- update to 1.3.2:
+  * Added a new backend RedisClusterBackend, allowing support for
+    Redis Cluster.
+  * Added support for additional Redis client parameters
+    RedisBackend.socket_connect_timeout,
+    RedisBackend.socket_keepalive and
+    RedisBackend.socket_keepalive_options.
+
+-------------------------------------------------------------------

Old:
----
  dogpile.cache-1.3.1.tar.gz

New:
----
  dogpile.cache-1.3.2.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-dogpile.cache.spec ++++++
--- /var/tmp/diff_new_pack.OEBt85/_old  2024-03-18 16:47:53.126319630 +0100
+++ /var/tmp/diff_new_pack.OEBt85/_new  2024-03-18 16:47:53.130319777 +0100
@@ -18,10 +18,10 @@
 
 %{?sle15_python_module_pythons}
 Name:           python-dogpile.cache
-Version:        1.3.1
+Version:        1.3.2
 Release:        0
 %define modname dogpile.cache
-%define modver  1_3_1
+%define modver  1_3_2
 Summary:        A caching front-end based on the Dogpile lock
 License:        BSD-3-Clause
 URL:            https://github.com/sqlalchemy/dogpile.cache

++++++ dogpile.cache-1.3.1.tar.gz -> dogpile.cache-1.3.2.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dogpile.cache-rel_1_3_1/docs/build/changelog.rst 
new/dogpile.cache-rel_1_3_2/docs/build/changelog.rst
--- old/dogpile.cache-rel_1_3_1/docs/build/changelog.rst        2024-02-07 
23:54:36.000000000 +0100
+++ new/dogpile.cache-rel_1_3_2/docs/build/changelog.rst        2024-02-21 
20:40:59.000000000 +0100
@@ -3,6 +3,28 @@
 =========
 
 .. changelog::
+    :version: 1.3.2
+    :released: Wed Feb 21 2024
+
+    .. change::
+        :tags: usecase, redis
+        :tickets: 250
+
+        Added a new backend :class:`.RedisClusterBackend`, allowing support for
+        Redis Cluster.  Pull request courtesy Maël Naccache Tüfekçi.
+
+
+    .. change::
+        :tags: usecase, redis
+        :tickets: 252
+
+        Added support for additional Redis client parameters
+        :paramref:`.RedisBackend.socket_connect_timeout`,
+        :paramref:`.RedisBackend.socket_keepalive` and
+        :paramref:`.RedisBackend.socket_keepalive_options`. Pull request 
courtesy
+        Takashi Kajinami.
+
+.. changelog::
     :version: 1.3.1
     :released: Wed Feb 7 2024
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dogpile.cache-rel_1_3_1/docs/build/conf.py 
new/dogpile.cache-rel_1_3_2/docs/build/conf.py
--- old/dogpile.cache-rel_1_3_1/docs/build/conf.py      2024-02-07 
23:54:36.000000000 +0100
+++ new/dogpile.cache-rel_1_3_2/docs/build/conf.py      2024-02-21 
20:40:59.000000000 +0100
@@ -74,7 +74,7 @@
 # The short X.Y version.
 version = dogpile.__version__
 # The full version, including alpha/beta/rc tags.
-release = "1.3.1"
+release = "1.3.2"
 
 
 # The language for content autogenerated by Sphinx. Refer to documentation
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dogpile.cache-rel_1_3_1/dogpile/__init__.py 
new/dogpile.cache-rel_1_3_2/dogpile/__init__.py
--- old/dogpile.cache-rel_1_3_1/dogpile/__init__.py     2024-02-07 
23:54:36.000000000 +0100
+++ new/dogpile.cache-rel_1_3_2/dogpile/__init__.py     2024-02-21 
20:40:59.000000000 +0100
@@ -1,4 +1,4 @@
-__version__ = "1.3.1"
+__version__ = "1.3.2"
 
 from .lock import Lock  # noqa
 from .lock import NeedRegenerationException  # noqa
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/dogpile.cache-rel_1_3_1/dogpile/cache/backends/__init__.py 
new/dogpile.cache-rel_1_3_2/dogpile/cache/backends/__init__.py
--- old/dogpile.cache-rel_1_3_1/dogpile/cache/backends/__init__.py      
2024-02-07 23:54:36.000000000 +0100
+++ new/dogpile.cache-rel_1_3_2/dogpile/cache/backends/__init__.py      
2024-02-21 20:40:59.000000000 +0100
@@ -45,3 +45,8 @@
     "dogpile.cache.backends.redis",
     "RedisSentinelBackend",
 )
+register_backend(
+    "dogpile.cache.redis_cluster",
+    "dogpile.cache.backends.redis",
+    "RedisClusterBackend",
+)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/dogpile.cache-rel_1_3_1/dogpile/cache/backends/redis.py 
new/dogpile.cache-rel_1_3_2/dogpile/cache/backends/redis.py
--- old/dogpile.cache-rel_1_3_1/dogpile/cache/backends/redis.py 2024-02-07 
23:54:36.000000000 +0100
+++ new/dogpile.cache-rel_1_3_2/dogpile/cache/backends/redis.py 2024-02-21 
20:40:59.000000000 +0100
@@ -18,12 +18,12 @@
     # delayed import
     redis = None  # noqa F811
 
-__all__ = ("RedisBackend", "RedisSentinelBackend")
+__all__ = ("RedisBackend", "RedisSentinelBackend", "RedisClusterBackend")
 
 
 class RedisBackend(BytesBackend):
     r"""A `Redis <http://redis.io/>`_ backend, using the
-    `redis-py <http://pypi.python.org/pypi/redis/>`_ backend.
+    `redis-py <http://pypi.python.org/pypi/redis/>`_ driver.
 
     Example configuration::
 
@@ -52,6 +52,8 @@
 
     :param username: string, default is no username.
 
+     .. versionadded:: 1.3.1
+
     :param password: string, default is no password.
 
     :param port: integer, default is ``6379``.
@@ -74,6 +76,21 @@
     :param socket_timeout: float, seconds for socket timeout.
      Default is None (no timeout).
 
+    :param socket_connect_timeout: float, seconds for socket connection
+     timeout. Default is None (no timeout).
+
+     .. versionadded:: 1.3.2
+
+    :param socket_keepalive: boolean, when True, socket keepalive is enabled.
+     Default is False.
+
+     .. versionadded:: 1.3.2
+
+    :param socket_keepalive_options: dict, socket keepalive options.
+     Default is None (no options).
+
+     .. versionadded:: 1.3.2
+
     :param lock_sleep: integer, number of seconds to sleep when failed to
      acquire a lock.  This argument is only valid when
      ``distributed_lock`` is ``True``.
@@ -95,9 +112,10 @@
      directly, including parameters like ``ssl``, ``ssl_certfile``,
      ``charset``, etc.
 
-     .. versionadded:: 1.1.6  Added ``connection_kwargs`` parameter.
+     .. versionadded:: 1.1.6
+
+
 
-     .. versionadded:: 1.3.1  Added ``username`` parameter.
 
     """
 
@@ -112,6 +130,13 @@
         self.db = arguments.pop("db", 0)
         self.distributed_lock = arguments.pop("distributed_lock", False)
         self.socket_timeout = arguments.pop("socket_timeout", None)
+        self.socket_connect_timeout = arguments.pop(
+            "socket_connect_timeout", None
+        )
+        self.socket_keepalive = arguments.pop("socket_keepalive", False)
+        self.socket_keepalive_options = arguments.pop(
+            "socket_keepalive_options", None
+        )
         self.lock_timeout = arguments.pop("lock_timeout", None)
         self.lock_sleep = arguments.pop("lock_sleep", 0.1)
         self.thread_local_lock = arguments.pop("thread_local_lock", True)
@@ -144,8 +169,16 @@
         else:
             args = {}
             args.update(self.connection_kwargs)
-            if self.socket_timeout:
+            if self.socket_timeout is not None:
                 args["socket_timeout"] = self.socket_timeout
+            if self.socket_connect_timeout is not None:
+                args["socket_connect_timeout"] = self.socket_connect_timeout
+            if self.socket_keepalive:
+                args["socket_keepalive"] = True
+                if self.socket_keepalive_options is not None:
+                    args[
+                        "socket_keepalive_options"
+                    ] = self.socket_keepalive_options
 
             if self.url is not None:
                 args.update(url=self.url)
@@ -227,8 +260,9 @@
 
 class RedisSentinelBackend(RedisBackend):
     """A `Redis <http://redis.io/>`_ backend, using the
-    `redis-py <http://pypi.python.org/pypi/redis/>`_ backend.
-    It will use the Sentinel of a Redis cluster.
+    `redis-py <http://pypi.python.org/pypi/redis/>`_ driver.
+    This backend is to be used when using
+    `Redis Sentinel <https://redis.io/docs/management/sentinel/>`_.
 
     .. versionadded:: 1.0.0
 
@@ -255,6 +289,8 @@
 
     :param username: string, default is no username.
 
+     .. versionadded:: 1.3.1
+
     :param password: string, default is no password.
 
     :param db: integer, default is ``0``.
@@ -275,6 +311,21 @@
     :param socket_timeout: float, seconds for socket timeout.
      Default is None (no timeout).
 
+     .. versionadded:: 1.3.2
+
+    :param socket_connect_timeout: float, seconds for socket connection
+     timeout.  Default is None (no timeout).
+
+     .. versionadded:: 1.3.2
+
+    :param socket_keepalive: boolean, when True, socket keepalive is enabled
+     Default is False.
+
+     .. versionadded:: 1.3.2
+
+    :param socket_keepalive_options: dict, socket keepalive options.
+     Default is {} (no options).
+
     :param sentinels: is a list of sentinel nodes. Each node is represented by
      a pair (hostname, port).
      Default is None (not in sentinel mode).
@@ -302,7 +353,6 @@
      asynchronous runners, as they run in a different thread than the one
      used to create the lock.
 
-     .. versionadded:: 1.3.1  Added ``username`` parameter.
 
     """
 
@@ -342,6 +392,16 @@
             sentinel_kwargs.setdefault("db", self.db)
         if self.socket_timeout is not None:
             connection_kwargs.setdefault("socket_timeout", self.socket_timeout)
+        if self.socket_connect_timeout is not None:
+            connection_kwargs.setdefault(
+                "socket_connect_timeout", self.socket_connect_timeout
+            )
+        if self.socket_keepalive:
+            connection_kwargs.setdefault("socket_keepalive", True)
+            if self.socket_keepalive_options is not None:
+                connection_kwargs.setdefault(
+                    "socket_keepalive_options", self.socket_keepalive_options
+                )
 
         sentinel = redis.sentinel.Sentinel(
             self.sentinels,
@@ -350,3 +410,160 @@
         )
         self.writer_client = sentinel.master_for(self.service_name)
         self.reader_client = sentinel.slave_for(self.service_name)
+
+
+class RedisClusterBackend(RedisBackend):
+    r"""A `Redis <http://redis.io/>`_ backend, using the
+    `redis-py <http://pypi.python.org/pypi/redis/>`_ driver.
+    This backend is to be used when connecting to a
+    `Redis Cluster <https://redis.io/docs/management/scaling/>`_ which
+    will use the
+    `RedisCluster Client
+    <https://redis.readthedocs.io/en/stable/connections.html#cluster-client>`_.
+
+    .. seealso::
+
+        `Clustering <https://redis.readthedocs.io/en/stable/clustering.html>`_
+        in the redis-py documentation.
+
+    Requires redis-py version >=4.1.0.
+
+    .. versionadded:: 1.3.2
+
+    Connecting to the cluster requires one of:
+
+    * Passing a list of startup nodes
+    * Passing only one node of the cluster, Redis will use automatic discovery
+      to find the other nodes.
+
+    Example configuration, using startup nodes::
+
+        from dogpile.cache import make_region
+        from redis.cluster import ClusterNode
+
+        region = make_region().configure(
+            'dogpile.cache.redis_cluster',
+            arguments = {
+                "startup_nodes": [
+                    ClusterNode('localhost', 6379),
+                    ClusterNode('localhost', 6378)
+                ]
+            }
+        )
+
+    It is recommended to use startup nodes, so that connections will be
+    successful as at least one node will always be present.  Connection
+    arguments such as password, username or
+    CA certificate may be passed using ``connection_kwargs``::
+
+        from dogpile.cache import make_region
+        from redis.cluster import ClusterNode
+
+        connection_kwargs = {
+            "username": "admin",
+            "password": "averystrongpassword",
+            "ssl": True,
+            "ssl_ca_certs": "redis.pem",
+        }
+
+        nodes = [
+            ClusterNode("localhost", 6379),
+            ClusterNode("localhost", 6380),
+            ClusterNode("localhost", 6381),
+        ]
+
+        region = make_region().configure(
+            "dogpile.cache.redis_cluster",
+            arguments={
+                "startup_nodes": nodes,
+                "connection_kwargs": connection_kwargs,
+            },
+        )
+
+    Passing a URL to one node only will allow the driver to discover the whole
+    cluster automatically::
+
+        from dogpile.cache import make_region
+
+        region = make_region().configure(
+            'dogpile.cache.redis_cluster',
+            arguments = {
+                "url": "localhost:6379/0"
+            }
+        )
+
+    A caveat of the above approach is that if the single node targeting
+    is not available, this would prevent the connection from being successful.
+
+    Parameters accepted include:
+
+    :param startup_nodes: List of ClusterNode. The list of nodes in
+     the cluster that the client will try to connect to.
+
+    :param url: string. If provided, will override separate
+     host/password/port/db params.  The format is that accepted by
+     ``RedisCluster.from_url()``.
+
+    :param db: integer, default is ``0``.
+
+    :param redis_expiration_time: integer, number of seconds after setting
+     a value that Redis should expire it.  This should be larger than dogpile's
+     cache expiration.  By default no expiration is set.
+
+    :param distributed_lock: boolean, when True, will use a
+     redis-lock as the dogpile lock. Use this when multiple processes will be
+     talking to the same redis instance. When left at False, dogpile will
+     coordinate on a regular threading mutex.
+
+    :param lock_timeout: integer, number of seconds after acquiring a lock that
+     Redis should expire it.  This argument is only valid when
+     ``distributed_lock`` is ``True``.
+
+    :param socket_timeout: float, seconds for socket timeout.
+     Default is None (no timeout).
+
+    :param socket_connect_timeout: float, seconds for socket connection
+     timeout.  Default is None (no timeout).
+
+    :param socket_keepalive: boolean, when True, socket keepalive is enabled
+     Default is False.
+
+    :param lock_sleep: integer, number of seconds to sleep when failed to
+     acquire a lock.  This argument is only valid when
+     ``distributed_lock`` is ``True``.
+
+    :param thread_local_lock: bool, whether a thread-local Redis lock object
+     should be used. This is the default, but is not compatible with
+     asynchronous runners, as they run in a different thread than the one
+     used to create the lock.
+
+    :param connection_kwargs: dict, additional keyword arguments are passed
+     along to the
+     ``RedisCluster.from_url()`` method or ``RedisCluster()`` constructor
+     directly, including parameters like ``ssl``, ``ssl_certfile``,
+     ``charset``, etc.
+
+    """
+
+    def __init__(self, arguments):
+        arguments = arguments.copy()
+        self.startup_nodes = arguments.pop("startup_nodes", None)
+        super().__init__(arguments)
+
+    def _imports(self):
+        global redis
+        import redis.cluster
+
+    def _create_client(self):
+        redis_cluster: redis.cluster.RedisCluster[typing.Any]
+        if self.url is not None:
+            redis_cluster = redis.cluster.RedisCluster.from_url(
+                self.url, **self.connection_kwargs
+            )
+        else:
+            redis_cluster = redis.cluster.RedisCluster(
+                startup_nodes=self.startup_nodes,
+                **self.connection_kwargs,
+            )
+        self.writer_client = typing.cast(redis.Redis[bytes], redis_cluster)
+        self.reader_client = self.writer_client
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/dogpile.cache-rel_1_3_1/tests/cache/test_redis_backend.py 
new/dogpile.cache-rel_1_3_2/tests/cache/test_redis_backend.py
--- old/dogpile.cache-rel_1_3_1/tests/cache/test_redis_backend.py       
2024-02-07 23:54:36.000000000 +0100
+++ new/dogpile.cache-rel_1_3_2/tests/cache/test_redis_backend.py       
2024-02-21 20:40:59.000000000 +0100
@@ -202,6 +202,41 @@
         expected.update({"username": None, "password": None})
         self._test_helper(MockStrictRedis, expected, arguments)
 
+    def test_connect_with_socket_connect_timeout(self, MockStrictRedis):
+        arguments = {
+            "host": "127.0.0.1",
+            "port": 6379,
+            "socket_timeout": 1.0,
+            "db": 0,
+        }
+        expected = arguments.copy()
+        expected.update({"username": None, "password": None})
+        self._test_helper(MockStrictRedis, expected, arguments)
+
+    def test_connect_with_socket_keepalive(self, MockStrictRedis):
+        arguments = {
+            "host": "127.0.0.1",
+            "port": 6379,
+            "socket_keepalive": True,
+            "db": 0,
+        }
+        expected = arguments.copy()
+        expected.update({"username": None, "password": None})
+        self._test_helper(MockStrictRedis, expected, arguments)
+
+    def test_connect_with_socket_keepalive_options(self, MockStrictRedis):
+        arguments = {
+            "host": "127.0.0.1",
+            "port": 6379,
+            "socket_keepalive": True,
+            # 4 = socket.TCP_KEEPIDLE
+            "socket_keepalive_options": {4, 10.0},
+            "db": 0,
+        }
+        expected = arguments.copy()
+        expected.update({"username": None, "password": None})
+        self._test_helper(MockStrictRedis, expected, arguments)
+
     def test_connect_with_connection_pool(self, MockStrictRedis):
         pool = Mock()
         arguments = {"connection_pool": pool, "socket_timeout": 0.5}

Reply via email to