https://github.com/python/cpython/commit/b484c32d0ae386df8751aea86b3055813d8f805b
commit: b484c32d0ae386df8751aea86b3055813d8f805b
branch: main
author: Cody Maloney <[email protected]>
committer: kumaraditya303 <[email protected]>
date: 2025-11-24T21:06:53+05:30
summary:

gh-141863: use `bytearray.take_bytes` in asyncio streams for better performance 
(#141864)

files:
A Misc/NEWS.d/next/Library/2025-11-22-16-33-48.gh-issue-141863.4PLhnv.rst
M Lib/asyncio/streams.py

diff --git a/Lib/asyncio/streams.py b/Lib/asyncio/streams.py
index 59e22f523a85c5..d2db1a930c2ad2 100644
--- a/Lib/asyncio/streams.py
+++ b/Lib/asyncio/streams.py
@@ -667,8 +667,7 @@ async def readuntil(self, separator=b'\n'):
             # adds data which makes separator be found. That's why we check for
             # EOF *after* inspecting the buffer.
             if self._eof:
-                chunk = bytes(self._buffer)
-                self._buffer.clear()
+                chunk = self._buffer.take_bytes()
                 raise exceptions.IncompleteReadError(chunk, None)
 
             # _wait_for_data() will resume reading if stream was paused.
@@ -678,10 +677,9 @@ async def readuntil(self, separator=b'\n'):
             raise exceptions.LimitOverrunError(
                 'Separator is found, but chunk is longer than limit', 
match_start)
 
-        chunk = self._buffer[:match_end]
-        del self._buffer[:match_end]
+        chunk = self._buffer.take_bytes(match_end)
         self._maybe_resume_transport()
-        return bytes(chunk)
+        return chunk
 
     async def read(self, n=-1):
         """Read up to `n` bytes from the stream.
@@ -716,20 +714,16 @@ async def read(self, n=-1):
             # collect everything in self._buffer, but that would
             # deadlock if the subprocess sends more than self.limit
             # bytes.  So just call self.read(self._limit) until EOF.
-            blocks = []
-            while True:
-                block = await self.read(self._limit)
-                if not block:
-                    break
-                blocks.append(block)
-            return b''.join(blocks)
+            joined = bytearray()
+            while block := await self.read(self._limit):
+                joined += block
+            return joined.take_bytes()
 
         if not self._buffer and not self._eof:
             await self._wait_for_data('read')
 
         # This will work right even if buffer is less than n bytes
-        data = bytes(memoryview(self._buffer)[:n])
-        del self._buffer[:n]
+        data = self._buffer.take_bytes(min(len(self._buffer), n))
 
         self._maybe_resume_transport()
         return data
@@ -760,18 +754,12 @@ async def readexactly(self, n):
 
         while len(self._buffer) < n:
             if self._eof:
-                incomplete = bytes(self._buffer)
-                self._buffer.clear()
+                incomplete = self._buffer.take_bytes()
                 raise exceptions.IncompleteReadError(incomplete, n)
 
             await self._wait_for_data('readexactly')
 
-        if len(self._buffer) == n:
-            data = bytes(self._buffer)
-            self._buffer.clear()
-        else:
-            data = bytes(memoryview(self._buffer)[:n])
-            del self._buffer[:n]
+        data = self._buffer.take_bytes(n)
         self._maybe_resume_transport()
         return data
 
diff --git 
a/Misc/NEWS.d/next/Library/2025-11-22-16-33-48.gh-issue-141863.4PLhnv.rst 
b/Misc/NEWS.d/next/Library/2025-11-22-16-33-48.gh-issue-141863.4PLhnv.rst
new file mode 100644
index 00000000000000..910ab98c950f9e
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2025-11-22-16-33-48.gh-issue-141863.4PLhnv.rst
@@ -0,0 +1,2 @@
+Update :ref:`asyncio-streams` to use :func:`bytearray.take_bytes` for a over
+10% performance improvement on pyperformance asyncio_tcp benchmark.

_______________________________________________
Python-checkins mailing list -- [email protected]
To unsubscribe send an email to [email protected]
https://mail.python.org/mailman3//lists/python-checkins.python.org
Member address: [email protected]

Reply via email to