Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package matrix-synapse for openSUSE:Factory 
checked in at 2026-05-08 16:43:14
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/matrix-synapse (Old)
 and      /work/SRC/openSUSE:Factory/.matrix-synapse.new.1966 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "matrix-synapse"

Fri May  8 16:43:14 2026 rev:156 rq:1351450 version:1.152.1

Changes:
--------
--- /work/SRC/openSUSE:Factory/matrix-synapse/matrix-synapse.changes    
2026-04-29 19:19:09.952330820 +0200
+++ /work/SRC/openSUSE:Factory/.matrix-synapse.new.1966/matrix-synapse.changes  
2026-05-08 16:43:58.733696591 +0200
@@ -1,0 +2,13 @@
+Thu May  7 15:16:16 UTC 2026 - Marcus Rueckert <[email protected]>
+
+- Update to 1.152.1 (boo#1264445)
+  - Prevent CPU starvation (Denial of Service) under worker lock
+    contention, additionally capping the WorkerLock time out
+    interval to a maximum of 60 seconds. Contributed by Famedly.
+    (#19394, ELEMENTSEC-2026-1706, GHSA-8q93-326v-3m7g, CVE
+    pending)
+  - Prevent pagination ending when a page is full of rejected
+    events. (ELEMENTSEC-2025-1636, GHSA-6qf2-7x63-mm6v, CVE
+    pending)
+
+-------------------------------------------------------------------

Old:
----
  matrix-synapse-1.152.0.obscpio

New:
----
  matrix-synapse-1.152.1.obscpio

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ matrix-synapse-test.spec ++++++
--- /var/tmp/diff_new_pack.pTffUE/_old  2026-05-08 16:44:06.082002712 +0200
+++ /var/tmp/diff_new_pack.pTffUE/_new  2026-05-08 16:44:06.086002879 +0200
@@ -27,7 +27,7 @@
 
 %define         pkgname matrix-synapse
 Name:           %{pkgname}-test
-Version:        1.152.0
+Version:        1.152.1
 Release:        0
 Summary:        Test package for %{pkgname}
 License:        AGPL-3.0-or-later

++++++ matrix-synapse.spec ++++++
--- /var/tmp/diff_new_pack.pTffUE/_old  2026-05-08 16:44:06.118004212 +0200
+++ /var/tmp/diff_new_pack.pTffUE/_new  2026-05-08 16:44:06.122004379 +0200
@@ -227,7 +227,7 @@
 %define         pkgname matrix-synapse
 %define         eggname matrix_synapse
 Name:           %{pkgname}
-Version:        1.152.0
+Version:        1.152.1
 Release:        0
 Summary:        Matrix protocol reference homeserver
 License:        AGPL-3.0-or-later

++++++ _service ++++++
--- /var/tmp/diff_new_pack.pTffUE/_old  2026-05-08 16:44:06.198007545 +0200
+++ /var/tmp/diff_new_pack.pTffUE/_new  2026-05-08 16:44:06.202007711 +0200
@@ -4,7 +4,7 @@
     <param name="versionformat">@PARENT_TAG@</param>
     <param name="url">https://github.com/element-hq/synapse.git</param>
     <param name="scm">git</param>
-    <param name="revision">v1.152.0</param>
+    <param name="revision">v1.152.1</param>
     <param name="versionrewrite-pattern">v(.*)</param>
     <param name="versionrewrite-replacement">\1</param>
     <!--

++++++ matrix-synapse-1.152.0.obscpio -> matrix-synapse-1.152.1.obscpio ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/matrix-synapse-1.152.0/CHANGES.md 
new/matrix-synapse-1.152.1/CHANGES.md
--- old/matrix-synapse-1.152.0/CHANGES.md       2026-04-28 14:45:53.000000000 
+0200
+++ new/matrix-synapse-1.152.1/CHANGES.md       2026-05-07 14:49:49.000000000 
+0200
@@ -1,3 +1,11 @@
+# Synapse 1.152.1 (2026-05-07)
+
+## Security Fixes
+
+- Prevent CPU starvation (Denial of Service) under worker lock contention, 
additionally capping the `WorkerLock` time out interval to a maximum of 60 
seconds. Contributed by Famedly. 
([\#19394](https://github.com/element-hq/synapse/issues/19394), 
ELEMENTSEC-2026-1706, 
[GHSA-8q93-326v-3m7g](https://github.com/element-hq/synapse/security/advisories/GHSA-8q93-326v-3m7g),
 CVE pending)
+- Prevent pagination ending when a page is full of rejected events. 
(ELEMENTSEC-2025-1636, 
[GHSA-6qf2-7x63-mm6v](https://github.com/element-hq/synapse/security/advisories/GHSA-6qf2-7x63-mm6v),
 CVE pending)
+
+
 # Synapse 1.152.0 (2026-04-28)
 
 No significant changes since 1.152.0rc1.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/matrix-synapse-1.152.0/debian/changelog 
new/matrix-synapse-1.152.1/debian/changelog
--- old/matrix-synapse-1.152.0/debian/changelog 2026-04-28 14:45:53.000000000 
+0200
+++ new/matrix-synapse-1.152.1/debian/changelog 2026-05-07 14:49:49.000000000 
+0200
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.152.1) stable; urgency=medium
+
+  * New Synapse release 1.152.1.
+
+ -- Synapse Packaging team <[email protected]>  Thu, 07 May 2026 13:29:05 
+0100
+
 matrix-synapse-py3 (1.152.0) stable; urgency=medium
 
   * New Synapse release 1.152.0.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/matrix-synapse-1.152.0/pyproject.toml 
new/matrix-synapse-1.152.1/pyproject.toml
--- old/matrix-synapse-1.152.0/pyproject.toml   2026-04-28 14:45:53.000000000 
+0200
+++ new/matrix-synapse-1.152.1/pyproject.toml   2026-05-07 14:49:49.000000000 
+0200
@@ -1,6 +1,6 @@
 [project]
 name = "matrix-synapse"
-version = "1.152.0"
+version = "1.152.1"
 description = "Homeserver for the Matrix decentralised comms protocol"
 readme = "README.rst"
 authors = [
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/matrix-synapse-1.152.0/synapse/handlers/pagination.py 
new/matrix-synapse-1.152.1/synapse/handlers/pagination.py
--- old/matrix-synapse-1.152.0/synapse/handlers/pagination.py   2026-04-28 
14:45:53.000000000 +0200
+++ new/matrix-synapse-1.152.1/synapse/handlers/pagination.py   2026-05-07 
14:49:49.000000000 +0200
@@ -566,7 +566,7 @@
         (
             events,
             next_key,
-            _,
+            limited,
         ) = await self.store.paginate_room_events_by_topological_ordering(
             room_id=room_id,
             from_key=from_token.room_key,
@@ -645,7 +645,7 @@
                     (
                         events,
                         next_key,
-                        _,
+                        limited,
                     ) = await 
self.store.paginate_room_events_by_topological_ordering(
                         room_id=room_id,
                         from_key=from_token.room_key,
@@ -668,11 +668,12 @@
 
         next_token = from_token.copy_and_replace(StreamKeyType.ROOM, next_key)
 
-        # if no events are returned from pagination, that implies
-        # we have reached the end of the available events.
+        # if no events are returned from pagination (this page is empty)
+        # and there aren't any more pages (not limited),
+        # that implies we have reached the end of the available events.
         # In that case we do not return end, to tell the client
         # there is no need for further queries.
-        if not events:
+        if not limited and not events:
             return GetMessagesResult(
                 messages_chunk=[],
                 bundled_aggregations={},
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/matrix-synapse-1.152.0/synapse/handlers/worker_lock.py 
new/matrix-synapse-1.152.1/synapse/handlers/worker_lock.py
--- old/matrix-synapse-1.152.0/synapse/handlers/worker_lock.py  2026-04-28 
14:45:53.000000000 +0200
+++ new/matrix-synapse-1.152.1/synapse/handlers/worker_lock.py  2026-05-07 
14:49:49.000000000 +0200
@@ -54,6 +54,9 @@
 # will not disappear under our feet as long as we don't delete the room.
 NEW_EVENT_DURING_PURGE_LOCK_NAME = "new_event_during_purge_lock"
 
+WORKER_LOCK_MAX_RETRY_INTERVAL = Duration(seconds=60)
+WORKER_LOCK_EXCESSIVE_WAITING_WARN_DURATION = Duration(minutes=10)
+
 
 class WorkerLocksHandler:
     """A class for waiting on taking out locks, rather than using the storage
@@ -206,9 +209,10 @@
     lock_name: str
     lock_key: str
     write: bool | None
+    start_ts_ms: int = 0
     deferred: "defer.Deferred[None]" = attr.Factory(defer.Deferred)
     _inner_lock: Lock | None = None
-    _retry_interval: float = 0.1
+    _timeout_interval: float = 0.1
     _lock_span: "opentracing.Scope" = attr.Factory(
         lambda: start_active_span("WaitingLock.lock")
     )
@@ -220,6 +224,7 @@
                 self.deferred.callback(None)
 
     async def __aenter__(self) -> None:
+        self.start_ts_ms = self.clock.time_msec()
         self._lock_span.__enter__()
 
         with start_active_span("WaitingLock.waiting_for_lock"):
@@ -240,19 +245,44 @@
                     break
 
                 try:
-                    # Wait until the we get notified the lock might have been
+                    # Wait until the notification that the lock might have been
                     # released (by the deferred being resolved). We also
-                    # periodically wake up in case the lock was released but we
+                    # periodically wake up in case the lock was released, but 
we
                     # weren't notified.
                     with PreserveLoggingContext():
-                        timeout = self._get_next_retry_interval()
                         await timeout_deferred(
                             deferred=self.deferred,
-                            timeout=timeout,
+                            timeout=self._timeout_interval,
                             clock=self.clock,
                         )
-                except Exception:
-                    pass
+                except defer.TimeoutError:
+                    # Only increment the timeout value if this was an actual 
timeout
+                    # (defer.TimeoutError)
+                    self._increment_timeout_interval()
+
+                    now_ms = self.clock.time_msec()
+                    time_spent_trying_to_lock = Duration(
+                        milliseconds=now_ms - self.start_ts_ms
+                    )
+                    if (
+                        time_spent_trying_to_lock.as_millis()
+                        > 
WORKER_LOCK_EXCESSIVE_WAITING_WARN_DURATION.as_millis()
+                    ):
+                        logger.warning(
+                            "(WaitingLock (%s, %s)) Time spent waiting to 
acquire lock "
+                            "is getting excessive: %ss. There may be a 
deadlock.",
+                            self.lock_name,
+                            self.lock_key,
+                            time_spent_trying_to_lock.as_secs(),
+                        )
+
+                except Exception as e:
+                    logger.warning(
+                        "Caught an exception while waiting on 
WaitingLock(lock_name=%s, lock_key=%s): %r",
+                        self.lock_name,
+                        self.lock_key,
+                        e,
+                    )
 
         return await self._inner_lock.__aenter__()
 
@@ -273,15 +303,14 @@
 
         return r
 
-    def _get_next_retry_interval(self) -> float:
-        next = self._retry_interval
-        self._retry_interval = max(5, next * 2)
-        if self._retry_interval > Duration(minutes=10).as_secs():  # >7 
iterations
-            logger.warning(
-                "Lock timeout is getting excessive: %ss. There may be a 
deadlock.",
-                self._retry_interval,
-            )
-        return next * random.uniform(0.9, 1.1)
+    def _increment_timeout_interval(self) -> float:
+        next_interval = self._timeout_interval
+        next_interval = min(WORKER_LOCK_MAX_RETRY_INTERVAL.as_secs(), 
next_interval * 2)
+
+        # The jitter value is maintained for the timeout, to help avoid a 
"thundering
+        # herd" situation when all locks may time out at the same time.
+        self._timeout_interval = next_interval * random.uniform(0.9, 1.1)
+        return self._timeout_interval
 
 
 @attr.s(auto_attribs=True, eq=False)
@@ -294,10 +323,11 @@
     store: LockStore
     handler: WorkerLocksHandler
 
+    start_ts_ms: int = 0
     deferred: "defer.Deferred[None]" = attr.Factory(defer.Deferred)
 
     _inner_lock_cm: AsyncContextManager | None = None
-    _retry_interval: float = 0.1
+    _timeout_interval: float = 0.1
     _lock_span: "opentracing.Scope" = attr.Factory(
         lambda: start_active_span("WaitingLock.lock")
     )
@@ -309,6 +339,7 @@
                 self.deferred.callback(None)
 
     async def __aenter__(self) -> None:
+        self.start_ts_ms = self.clock.time_msec()
         self._lock_span.__enter__()
 
         with start_active_span("WaitingLock.waiting_for_lock"):
@@ -324,19 +355,42 @@
                     break
 
                 try:
-                    # Wait until the we get notified the lock might have been
+                    # Wait until the notification that the lock might have been
                     # released (by the deferred being resolved). We also
-                    # periodically wake up in case the lock was released but we
+                    # periodically wake up in case the lock was released, but 
we
                     # weren't notified.
                     with PreserveLoggingContext():
-                        timeout = self._get_next_retry_interval()
                         await timeout_deferred(
                             deferred=self.deferred,
-                            timeout=timeout,
+                            timeout=self._timeout_interval,
                             clock=self.clock,
                         )
-                except Exception:
-                    pass
+                except defer.TimeoutError:
+                    # Only increment the timeout value if this was an actual 
timeout
+                    # (defer.TimeoutError)
+                    self._increment_timeout_interval()
+
+                    now_ms = self.clock.time_msec()
+                    time_spent_trying_to_lock = Duration(
+                        milliseconds=now_ms - self.start_ts_ms
+                    )
+                    if (
+                        time_spent_trying_to_lock.as_millis()
+                        > 
WORKER_LOCK_EXCESSIVE_WAITING_WARN_DURATION.as_millis()
+                    ):
+                        logger.warning(
+                            "(WaitingMultiLock (%r)) Time spent waiting to 
acquire lock "
+                            "is getting excessive: %ss. There may be a 
deadlock.",
+                            self.lock_names,
+                            time_spent_trying_to_lock.as_secs(),
+                        )
+
+                except Exception as e:
+                    logger.warning(
+                        "Caught an exception while waiting on 
WaitingMultiLock(lock_names=%r): %r",
+                        self.lock_names,
+                        e,
+                    )
 
         assert self._inner_lock_cm
         await self._inner_lock_cm.__aenter__()
@@ -360,12 +414,11 @@
 
         return r
 
-    def _get_next_retry_interval(self) -> float:
-        next = self._retry_interval
-        self._retry_interval = max(5, next * 2)
-        if self._retry_interval > Duration(minutes=10).as_secs():  # >7 
iterations
-            logger.warning(
-                "Lock timeout is getting excessive: %ss. There may be a 
deadlock.",
-                self._retry_interval,
-            )
-        return next * random.uniform(0.9, 1.1)
+    def _increment_timeout_interval(self) -> float:
+        next_interval = self._timeout_interval
+        next_interval = min(WORKER_LOCK_MAX_RETRY_INTERVAL.as_secs(), 
next_interval * 2)
+
+        # The jitter value is maintained for the timeout, to help avoid a 
"thundering
+        # herd" situation when all locks may time out at the same time.
+        self._timeout_interval = next_interval * random.uniform(0.9, 1.1)
+        return self._timeout_interval
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/matrix-synapse-1.152.0/synapse/storage/databases/main/stream.py 
new/matrix-synapse-1.152.1/synapse/storage/databases/main/stream.py
--- old/matrix-synapse-1.152.0/synapse/storage/databases/main/stream.py 
2026-04-28 14:45:53.000000000 +0200
+++ new/matrix-synapse-1.152.1/synapse/storage/databases/main/stream.py 
2026-05-07 14:49:49.000000000 +0200
@@ -2425,12 +2425,19 @@
             event_filter: If provided filters the events to those that match 
the filter.
 
         Returns:
-            The results as a list of events, a token that points to the end of
-            the result set, and a boolean to indicate if there were more events
-            but we hit the limit. If no events are returned then the end of the
+            - The results as a list of events;
+            - a token that points to the end of the result set; and
+            - a boolean to indicate if there were more events
+              but we hit the limit (`limited`)
+
+            If no events are returned and `limited` is false, then the end of 
the
             stream has been reached (i.e. there are no events between 
`from_key`
             and `to_key`).
 
+            When `limited` is true, that means that more pagination can be 
attempted.
+            Note that `limited` can be true even if no events are returned,
+            because rejected events are filtered out after the limit check.
+
             When Direction.FORWARDS: from_key < x <= to_key, (ascending order)
             When Direction.BACKWARDS: from_key >= x > to_key, (descending 
order)
         """
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/matrix-synapse-1.152.0/tests/handlers/test_worker_lock.py 
new/matrix-synapse-1.152.1/tests/handlers/test_worker_lock.py
--- old/matrix-synapse-1.152.0/tests/handlers/test_worker_lock.py       
2026-04-28 14:45:53.000000000 +0200
+++ new/matrix-synapse-1.152.1/tests/handlers/test_worker_lock.py       
2026-05-07 14:49:49.000000000 +0200
@@ -26,7 +26,9 @@
 from twisted.internet.testing import MemoryReactor
 
 from synapse.server import HomeServer
+from synapse.storage.databases.main.lock import _RENEWAL_INTERVAL
 from synapse.util.clock import Clock
+from synapse.util.duration import Duration
 
 from tests import unittest
 from tests.replication._base import BaseMultiWorkerStreamTestCase
@@ -40,6 +42,7 @@
         self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
     ) -> None:
         self.worker_lock_handler = self.hs.get_worker_locks_handler()
+        self.store = self.hs.get_datastores().main
 
     def test_wait_for_lock_locally(self) -> None:
         """Test waiting for a lock on a single worker"""
@@ -56,6 +59,66 @@
         self.get_success(d2)
         self.get_success(lock2.__aexit__(None, None, None))
 
+    def test_timeouts_for_lock_locally(self) -> None:
+        """
+        Test that we regularly retry to reacquire locks.
+
+        This is a regression test to make sure the lock retry time doesn't 
balloon to a value
+        so large it can't even be printed reliably anymore.
+        """
+
+        # Create and acquire the first lock
+        lock1 = self.worker_lock_handler.acquire_lock("name", "key")
+        self.get_success(lock1.__aenter__())
+
+        # Create and try to acquire the second lock
+        lock2 = self.worker_lock_handler.acquire_lock("name", "key")
+        d2 = defer.ensureDeferred(lock2.__aenter__())
+        # Make sure we haven't acquired the lock yet (`lock1` still holds it)
+        self.assertNoResult(d2)
+
+        # Advance time by an hour (some duration that would previously cause 
our timeout
+        # to balloon if it weren't constrained). Max back-off (saturate)
+        #
+        # Note: We use `_pump_by` instead of `pump`/`advance` as the `Lock` 
has an
+        # internal background looping call that runs every 30 seconds
+        # (`_RENEWAL_INTERVAL`) to renew the `Lock` and push it's "drop 
timeout" value
+        # further out by 2 minutes (`_LOCK_TIMEOUT_MS`). The `Lock` will 
prematurely
+        # drop if this renewal is not allowed to run, which sours the test.
+        # self.pump(amount=Duration(hours=1))
+        self._pump_by(amount=Duration(hours=1), by=_RENEWAL_INTERVAL)
+
+        # Make sure we haven't acquired the `lock2` yet (`lock1` still holds 
it)
+        self.assertNoResult(d2)
+
+        # Release the first lock (`lock1`). The second lock(`lock2`) should be
+        # automatically acquired by the `pump()` inside `get_success()`
+        self.get_success(lock1.__aexit__(None, None, None))
+
+        # We should now have the lock
+        self.successResultOf(d2)
+
+    def _pump_by(
+        self,
+        *,
+        amount: Duration = Duration(seconds=0),
+        by: Duration = Duration(seconds=0.1),
+    ) -> None:
+        """
+        Like `self.pump()` but you can specify the time increment to advance 
with until
+        you reach the time amount.
+
+        Unlike `self.pump()`, this doesn't multiply the time at all.
+
+        Args:
+            amount: The amount of time to advance
+            by: The time increment in seconds to advance time by until we 
reach the `amount`
+        """
+        end_time_s = self.reactor.seconds() + amount.as_secs()
+
+        while self.reactor.seconds() < end_time_s:
+            self.reactor.advance(by.as_secs())
+
     def test_lock_contention(self) -> None:
         """Test lock contention when a lot of locks wait on a single worker"""
         nb_locks_to_test = 500
@@ -124,3 +187,70 @@
 
         self.get_success(d2)
         self.get_success(lock2.__aexit__(None, None, None))
+
+    def test_timeouts_for_lock_worker(self) -> None:
+        """
+        Test that we regularly retry to reacquire locks.
+
+        This is a regression test to make sure the lock retry time doesn't 
balloon to a value
+        so large it can't even be printed reliably anymore.
+        """
+        worker = self.make_worker_hs(
+            "synapse.app.generic_worker",
+            extra_config={
+                "redis": {"enabled": True},
+            },
+        )
+        worker_lock_handler = worker.get_worker_locks_handler()
+
+        # Create and acquire the first lock on the main process
+        lock1 = self.main_worker_lock_handler.acquire_lock("name", "key")
+        self.get_success(lock1.__aenter__())
+
+        # Create and try to acquire the second lock on the worker
+        lock2 = worker_lock_handler.acquire_lock("name", "key")
+        d2 = defer.ensureDeferred(lock2.__aenter__())
+        # Make sure we haven't acquired the lock yet (`lock1` still holds it)
+        self.assertNoResult(d2)
+
+        # Advance time by an hour (some duration that would previously cause 
our timeout
+        # to balloon if it weren't constrained). Max back-off (saturate)
+        #
+        # Note: We use `_pump_by` instead of `pump`/`advance` as the `Lock` 
has an
+        # internal background looping call that runs every 30 seconds
+        # (`_RENEWAL_INTERVAL`) to renew the `Lock` and push it's "drop 
timeout" value
+        # further out by 2 minutes (`_LOCK_TIMEOUT_MS`). The `Lock` will 
prematurely
+        # drop if this renewal is not allowed to run, which sours the test.
+        # self.pump(amount=Duration(hours=1))
+        self._pump_by(amount=Duration(hours=1), by=_RENEWAL_INTERVAL)
+
+        # Make sure we haven't acquired the `lock2` yet (`lock1` still holds 
it)
+        self.assertNoResult(d2)
+
+        # Release the first lock (`lock1`). The second lock(`lock2`) should be
+        # automatically acquired by the `pump()` inside `get_success()`
+        self.get_success(lock1.__aexit__(None, None, None))
+
+        # We should now have the lock
+        self.successResultOf(d2)
+
+    def _pump_by(
+        self,
+        *,
+        amount: Duration = Duration(seconds=0),
+        by: Duration = Duration(seconds=0.1),
+    ) -> None:
+        """
+        Like `self.pump()` but you can specify the time increment to advance 
with until
+        you reach the time amount.
+
+        Unlike `self.pump()`, this doesn't multiply the time at all.
+
+        Args:
+            amount: The amount of time to advance
+            by: The time increment in seconds to advance time by until we 
reach the `amount`
+        """
+        end_time_s = self.reactor.seconds() + amount.as_secs()
+
+        while self.reactor.seconds() < end_time_s:
+            self.reactor.advance(by.as_secs())
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/matrix-synapse-1.152.0/tests/rest/client/test_rooms.py 
new/matrix-synapse-1.152.1/tests/rest/client/test_rooms.py
--- old/matrix-synapse-1.152.0/tests/rest/client/test_rooms.py  2026-04-28 
14:45:53.000000000 +0200
+++ new/matrix-synapse-1.152.1/tests/rest/client/test_rooms.py  2026-05-07 
14:49:49.000000000 +0200
@@ -66,7 +66,10 @@
 from tests import unittest
 from tests.http.server._base import make_request_with_cancellation_test
 from tests.storage.test_stream import PaginationTestCase
-from tests.test_utils.event_injection import create_event
+from tests.test_utils.event_injection import (
+    create_event,
+    inject_event,
+)
 from tests.unittest import override_config
 from tests.utils import default_config
 
@@ -2371,6 +2374,87 @@
             channel.json_body["errcode"], Codes.NOT_JSON, channel.json_body
         )
 
+    def test_room_messages_paginate_through_rejected_events(
+        self,
+    ) -> None:
+        """Test that pagination continues past a batch of rejected events.
+
+        Regression test for 
https://github.com/element-hq/synapse/security/advisories/GHSA-6qf2-7x63-mm6v
+
+        Synapse before 1.152.1 had a bug meaning that a batch full of only
+        rejected events would cause `/messages` to not return any more
+        pagination tokens, falsely signalling the end of backpagination.
+        """
+        # Send an early message that should not be filtered.
+        early_event_id = self.helper.send(self.room_id, "early 
message")["event_id"]
+
+        # Inject a batch of events and mark them as rejected in the database.
+        # We create more events than a single pagination request would fetch,
+        # so that one page of backward pagination request would only see 
rejected events.
+        for _ in range(3):
+            event = self.get_success(
+                inject_event(
+                    self.hs,
+                    room_id=self.room_id,
+                    sender=self.user_id,
+                    type=EventTypes.Message,
+                    content={"body": "filtered event", "msgtype": "m.text"},
+                )
+            )
+            self.get_success(
+                self.hs.get_datastores().main.db_pool.runInteraction(
+                    "mark_rejected",
+                    self.hs.get_datastores().main.mark_event_rejected_txn,
+                    event.event_id,
+                    "testing",
+                )
+            )
+
+        # Send a message after all the rejected events.
+        latest_event_id = self.helper.send(self.room_id, "latest 
message")["event_id"]
+
+        # Start backpaginating.
+        channel = self.make_request(
+            "GET", f"/rooms/{self.room_id}/messages?dir=b&limit=2"
+        )
+        self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
+
+        events_in_page = [e["event_id"] for e in channel.json_body["chunk"]]
+        end_token: str | None = channel.json_body["end"]
+
+        self.assertEqual(
+            events_in_page,
+            [latest_event_id],
+            "The latest event should be included in the first page we see 
whilst backpaginating",
+        )
+
+        event_ids_in_pages: list[list[str]] = [events_in_page]
+
+        # Bound the number of backpagination attempts to 2
+        for _ in range(2):
+            channel = self.make_request(
+                "GET", 
f"/rooms/{self.room_id}/messages?from={end_token}&dir=b&limit=2"
+            )
+            self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
+            events_in_page = [e["event_id"] for e in 
channel.json_body["chunk"]]
+            event_ids_in_pages.append(events_in_page)
+
+            if early_event_id in events_in_page:
+                # We have found the event we were looking for
+                return
+
+            self.assertIn(
+                "end",
+                channel.json_body,
+                f"No `end` token received. Did not find {early_event_id} 
whilst backpaginating ({latest_event_id = }, {event_ids_in_pages = })",
+            )
+            # Use the end_token in the next iteration
+            end_token = channel.json_body["end"]
+
+        self.fail(
+            f"Exhausted backpagination attempts. Did not find {early_event_id} 
whilst backpaginating ({latest_event_id = }, {event_ids_in_pages = })"
+        )
+
 
 class RoomMessageFilterTestCase(RoomBase):
     """Tests /rooms/$room_id/messages REST events."""

++++++ matrix-synapse.obsinfo ++++++
--- /var/tmp/diff_new_pack.pTffUE/_old  2026-05-08 16:44:08.370098032 +0200
+++ /var/tmp/diff_new_pack.pTffUE/_new  2026-05-08 16:44:08.374098198 +0200
@@ -1,5 +1,5 @@
 name: matrix-synapse
-version: 1.152.0
-mtime: 1777380353
-commit: 16863c87d56508ed163b3f6da9ba463e902b7e63
+version: 1.152.1
+mtime: 1778158189
+commit: d97b5b9e21681792d78c5d58243fc483c664b66e
 

++++++ vendor.tar.zst ++++++
/work/SRC/openSUSE:Factory/matrix-synapse/vendor.tar.zst 
/work/SRC/openSUSE:Factory/.matrix-synapse.new.1966/vendor.tar.zst differ: char 
7, line 1

Reply via email to