AioHandler already has a GPollFD so we can directly use its
events/revents.

Add the int poller_idx field to AioContext so we can map g_poll(3)
results back to AioHandlers.

Reuse aio_dispatch() to invoke handlers after g_poll(3).

Signed-off-by: Stefan Hajnoczi <stefa...@redhat.com>
---
 aio-posix.c         | 62 ++++++++++++++++-------------------------------------
 async.c             |  2 ++
 include/block/aio.h |  4 ++++
 3 files changed, 25 insertions(+), 43 deletions(-)

diff --git a/aio-posix.c b/aio-posix.c
index 35131a3..303e19f 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -25,6 +25,7 @@ struct AioHandler
     IOHandler *io_write;
     AioFlushHandler *io_flush;
     int deleted;
+    int poller_idx;
     void *opaque;
     QLIST_ENTRY(AioHandler) node;
 };
@@ -85,6 +86,7 @@ void aio_set_fd_handler(AioContext *ctx,
         node->io_write = io_write;
         node->io_flush = io_flush;
         node->opaque = opaque;
+        node->poller_idx = -1;
 
         node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP : 0);
         node->pfd.events |= (io_write ? G_IO_OUT : 0);
@@ -177,10 +179,7 @@ static bool aio_dispatch(AioContext *ctx)
 
 bool aio_poll(AioContext *ctx, bool blocking)
 {
-    static struct timeval tv0;
     AioHandler *node;
-    fd_set rdfds, wrfds;
-    int max_fd = -1;
     int ret;
     bool busy, progress;
 
@@ -206,12 +205,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
 
     ctx->walking_handlers++;
 
-    FD_ZERO(&rdfds);
-    FD_ZERO(&wrfds);
+    poller_reset(&ctx->poller);
 
-    /* fill fd sets */
+    /* fill poller */
     busy = false;
     QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+        node->poller_idx = -1;
+
         /* If there aren't pending AIO operations, don't invoke callbacks.
          * Otherwise, if there are no AIO requests, qemu_aio_wait() would
          * wait indefinitely.
@@ -222,13 +222,9 @@ bool aio_poll(AioContext *ctx, bool blocking)
             }
             busy = true;
         }
-        if (!node->deleted && node->io_read) {
-            FD_SET(node->pfd.fd, &rdfds);
-            max_fd = MAX(max_fd, node->pfd.fd + 1);
-        }
-        if (!node->deleted && node->io_write) {
-            FD_SET(node->pfd.fd, &wrfds);
-            max_fd = MAX(max_fd, node->pfd.fd + 1);
+        if (!node->deleted && node->pfd.events) {
+            node->poller_idx = poller_add_fd(&ctx->poller, node->pfd.fd,
+                                             node->pfd.events);
         }
     }
 
@@ -240,41 +236,21 @@ bool aio_poll(AioContext *ctx, bool blocking)
     }
 
     /* wait until next event */
-    ret = select(max_fd, &rdfds, &wrfds, NULL, blocking ? NULL : &tv0);
+    ret = g_poll(ctx->poller.poll_fds,
+                 ctx->poller.nfds,
+                 blocking ? -1 : 0);
 
     /* if we have any readable fds, dispatch event */
     if (ret > 0) {
-        /* we have to walk very carefully in case
-         * qemu_aio_set_fd_handler is called while we're walking */
-        node = QLIST_FIRST(&ctx->aio_handlers);
-        while (node) {
-            AioHandler *tmp;
-
-            ctx->walking_handlers++;
-
-            if (!node->deleted &&
-                FD_ISSET(node->pfd.fd, &rdfds) &&
-                node->io_read) {
-                node->io_read(node->opaque);
-                progress = true;
-            }
-            if (!node->deleted &&
-                FD_ISSET(node->pfd.fd, &wrfds) &&
-                node->io_write) {
-                node->io_write(node->opaque);
-                progress = true;
-            }
-
-            tmp = node;
-            node = QLIST_NEXT(node, node);
-
-            ctx->walking_handlers--;
-
-            if (!ctx->walking_handlers && tmp->deleted) {
-                QLIST_REMOVE(tmp, node);
-                g_free(tmp);
+        QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+            if (node->poller_idx != -1) {
+                node->pfd.revents |=
+                    poller_get_revents(&ctx->poller, node->poller_idx);
             }
         }
+        if (aio_dispatch(ctx)) {
+            progress = true;
+        }
     }
 
     assert(progress || busy);
diff --git a/async.c b/async.c
index 72d268a..1cd1b89 100644
--- a/async.c
+++ b/async.c
@@ -174,6 +174,7 @@ aio_ctx_finalize(GSource     *source)
 
     aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
     event_notifier_cleanup(&ctx->notifier);
+    poller_cleanup(&ctx->poller);
 }
 
 static GSourceFuncs aio_source_funcs = {
@@ -198,6 +199,7 @@ AioContext *aio_context_new(void)
 {
     AioContext *ctx;
     ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
+    poller_init(&ctx->poller);
     event_notifier_init(&ctx->notifier, false);
     aio_set_event_notifier(ctx, &ctx->notifier, 
                            (EventNotifierHandler *)
diff --git a/include/block/aio.h b/include/block/aio.h
index 8eda924..45ff047 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -17,6 +17,7 @@
 #include "qemu-common.h"
 #include "qemu/queue.h"
 #include "qemu/event_notifier.h"
+#include "qemu/poller.h"
 
 typedef struct BlockDriverAIOCB BlockDriverAIOCB;
 typedef void BlockDriverCompletionFunc(void *opaque, int ret);
@@ -63,6 +64,9 @@ typedef struct AioContext {
 
     /* Used for aio_notify.  */
     EventNotifier notifier;
+
+    /* GPollFDs for aio_poll() */
+    Poller poller;
 } AioContext;
 
 /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
-- 
1.8.1


Reply via email to