commit:     c06c7e50244292e263e5512f7baefc16bbe85456
Author:     Zac Medico <zmedico <AT> gentoo <DOT> org>
AuthorDate: Mon Apr 30 04:05:22 2018 +0000
Commit:     Zac Medico <zmedico <AT> gentoo <DOT> org>
CommitDate: Mon Apr 30 06:20:01 2018 +0000
URL:        https://gitweb.gentoo.org/proj/portage.git/commit/?id=c06c7e50

EbuildIpcDaemon: add_reader asyncio compat (bug 654382)

Use add_reader for asyncio compatibility.

Bug: https://bugs.gentoo.org/654382

 pym/_emerge/EbuildIpcDaemon.py | 28 +++++++---------------------
 pym/_emerge/FifoIpcDaemon.py   | 20 ++++++++------------
 2 files changed, 15 insertions(+), 33 deletions(-)

diff --git a/pym/_emerge/EbuildIpcDaemon.py b/pym/_emerge/EbuildIpcDaemon.py
index 8414d2020..c16049ee4 100644
--- a/pym/_emerge/EbuildIpcDaemon.py
+++ b/pym/_emerge/EbuildIpcDaemon.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2018 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import errno
@@ -32,24 +32,12 @@ class EbuildIpcDaemon(FifoIpcDaemon):
 
        __slots__ = ('commands',)
 
-       def _input_handler(self, fd, event):
+       def _input_handler(self):
                # Read the whole pickle in a single atomic read() call.
-               data = None
-               if event & self.scheduler.IO_IN:
-                       # For maximum portability, use os.read() here since
-                       # array.fromfile() and file.read() are both known to
-                       # erroneously return an empty string from this
-                       # non-blocking fifo stream on FreeBSD (bug #337465).
-                       try:
-                               data = os.read(fd, self._bufsize)
-                       except OSError as e:
-                               if e.errno != errno.EAGAIN:
-                                       raise
-                               # Assume that another event will be generated
-                               # if there's any relevant data.
-
-               if data:
-
+               data = self._read_buf(self._files.pipe_in, None)
+               if data is None:
+                       pass # EAGAIN
+               elif data:
                        try:
                                obj = pickle.loads(data)
                        except SystemExit:
@@ -85,7 +73,7 @@ class EbuildIpcDaemon(FifoIpcDaemon):
                                if reply_hook is not None:
                                        reply_hook()
 
-               elif event & self.scheduler.IO_HUP:
+               else: # EIO/POLLHUP
                        # This can be triggered due to a race condition which 
happens when
                        # the previous _reopen_input() call occurs before the 
writer has
                        # closed the pipe (see bug #401919). It's not safe to 
re-open
@@ -107,8 +95,6 @@ class EbuildIpcDaemon(FifoIpcDaemon):
                                finally:
                                        unlockfile(lock_obj)
 
-               return True
-
        def _send_reply(self, reply):
                # File streams are in unbuffered mode since we do atomic
                # read and write of whole pickles. Use non-blocking mode so

diff --git a/pym/_emerge/FifoIpcDaemon.py b/pym/_emerge/FifoIpcDaemon.py
index 3676e98da..0cbaa13c7 100644
--- a/pym/_emerge/FifoIpcDaemon.py
+++ b/pym/_emerge/FifoIpcDaemon.py
@@ -15,8 +15,7 @@ from portage.cache.mappings import slot_dict_class
 
 class FifoIpcDaemon(AbstractPollTask):
 
-       __slots__ = ("input_fifo", "output_fifo",) + \
-               ("_files", "_reg_id",)
+       __slots__ = ("input_fifo", "output_fifo", "_files")
 
        _file_names = ("pipe_in",)
        _files_dict = slot_dict_class(_file_names, prefix="")
@@ -40,9 +39,9 @@ class FifoIpcDaemon(AbstractPollTask):
                                        fcntl.fcntl(self._files.pipe_in,
                                                fcntl.F_GETFD) | 
fcntl.FD_CLOEXEC)
 
-               self._reg_id = self.scheduler.io_add_watch(
+               self.scheduler.add_reader(
                        self._files.pipe_in,
-                       self._registered_events, self._input_handler)
+                       self._input_handler)
 
                self._registered = True
 
@@ -51,7 +50,7 @@ class FifoIpcDaemon(AbstractPollTask):
                Re-open the input stream, in order to suppress
                POLLHUP events (bug #339976).
                """
-               self.scheduler.source_remove(self._reg_id)
+               self.scheduler.remove_reader(self._files.pipe_in)
                os.close(self._files.pipe_in)
                self._files.pipe_in = \
                        os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
@@ -67,9 +66,9 @@ class FifoIpcDaemon(AbstractPollTask):
                                        fcntl.fcntl(self._files.pipe_in,
                                                fcntl.F_GETFD) | 
fcntl.FD_CLOEXEC)
 
-               self._reg_id = self.scheduler.io_add_watch(
+               self.scheduler.add_reader(
                        self._files.pipe_in,
-                       self._registered_events, self._input_handler)
+                       self._input_handler)
 
        def isAlive(self):
                return self._registered
@@ -81,7 +80,7 @@ class FifoIpcDaemon(AbstractPollTask):
                # notify exit listeners
                self._async_wait()
 
-       def _input_handler(self, fd, event):
+       def _input_handler(self):
                raise NotImplementedError(self)
 
        def _unregister(self):
@@ -91,11 +90,8 @@ class FifoIpcDaemon(AbstractPollTask):
 
                self._registered = False
 
-               if self._reg_id is not None:
-                       self.scheduler.source_remove(self._reg_id)
-                       self._reg_id = None
-
                if self._files is not None:
                        for f in self._files.values():
+                               self.scheduler.remove_reader(f)
                                os.close(f)
                        self._files = None

Reply via email to