qemu_co_queue_next(&queue) arranges that the next queued coroutine is run at a later point in time. This deferred restart is useful because the caller may not want to transfer control yet.
This behavior was implemented using QEMUBH in the past, which meant that CoQueue (and hence CoMutex and CoRwlock) had a dependency on the AioContext event loop. This hidden dependency causes trouble when we move to a world with multiple event loops - now qemu_co_queue_next() needs to know which event loop to schedule the QEMUBH in. After pondering how to stash AioContext I realized the best solution is to not use AioContext at all. This patch implements the deferred restart behavior purely in terms of coroutines and no longer uses QEMUBH. Here is how it works: Each Coroutine has a wakeup queue that starts out empty. When qemu_co_queue_next() is called, the next coroutine is added to our wakeup queue. The wakeup queue is processed when we yield or terminate. Signed-off-by: Stefan Hajnoczi <stefa...@redhat.com> --- include/block/coroutine_int.h | 4 ++++ qemu-coroutine-lock.c | 56 ++++++++++++++++--------------------------- qemu-coroutine.c | 3 +++ trace-events | 2 +- 4 files changed, 29 insertions(+), 36 deletions(-) diff --git a/include/block/coroutine_int.h b/include/block/coroutine_int.h index 17eb71e..f133d65 100644 --- a/include/block/coroutine_int.h +++ b/include/block/coroutine_int.h @@ -38,6 +38,9 @@ struct Coroutine { void *entry_arg; Coroutine *caller; QSLIST_ENTRY(Coroutine) pool_next; + + /* Coroutines that should be woken up when we yield or terminate */ + QTAILQ_HEAD(, Coroutine) co_queue_wakeup; QTAILQ_ENTRY(Coroutine) co_queue_next; }; @@ -45,5 +48,6 @@ Coroutine *qemu_coroutine_new(void); void qemu_coroutine_delete(Coroutine *co); CoroutineAction qemu_coroutine_switch(Coroutine *from, Coroutine *to, CoroutineAction action); +void coroutine_fn qemu_co_queue_run_restart(Coroutine *co); #endif diff --git a/qemu-coroutine-lock.c b/qemu-coroutine-lock.c index 86efe1f..d9fea49 100644 --- a/qemu-coroutine-lock.c +++ b/qemu-coroutine-lock.c @@ -26,39 +26,11 @@ #include "block/coroutine.h" #include "block/coroutine_int.h" #include "qemu/queue.h" -#include "block/aio.h" #include "trace.h" -/* Coroutines are awoken from a BH to allow the current coroutine to complete - * its flow of execution. The BH may run after the CoQueue has been destroyed, - * so keep BH data in a separate heap-allocated struct. - */ -typedef struct { - QEMUBH *bh; - QTAILQ_HEAD(, Coroutine) entries; -} CoQueueNextData; - -static void qemu_co_queue_next_bh(void *opaque) -{ - CoQueueNextData *data = opaque; - Coroutine *next; - - trace_qemu_co_queue_next_bh(); - while ((next = QTAILQ_FIRST(&data->entries))) { - QTAILQ_REMOVE(&data->entries, next, co_queue_next); - qemu_coroutine_enter(next, NULL); - } - - qemu_bh_delete(data->bh); - g_slice_free(CoQueueNextData, data); -} - void qemu_co_queue_init(CoQueue *queue) { QTAILQ_INIT(&queue->entries); - - /* This will be exposed to callers once there are multiple AioContexts */ - queue->ctx = qemu_get_aio_context(); } void coroutine_fn qemu_co_queue_wait(CoQueue *queue) @@ -77,23 +49,37 @@ void coroutine_fn qemu_co_queue_wait_insert_head(CoQueue *queue) assert(qemu_in_coroutine()); } +/** + * qemu_co_queue_run_restart: + * + * Enter each coroutine that was previously marked for restart by + * qemu_co_queue_next() or qemu_co_queue_restart_all(). This function is + * invoked by the core coroutine code when the current coroutine yields or + * terminates. + */ +void qemu_co_queue_run_restart(Coroutine *co) +{ + Coroutine *next; + + trace_qemu_co_queue_run_restart(co); + while ((next = QTAILQ_FIRST(&co->co_queue_wakeup))) { + QTAILQ_REMOVE(&co->co_queue_wakeup, next, co_queue_next); + qemu_coroutine_enter(next, NULL); + } +} + static bool qemu_co_queue_do_restart(CoQueue *queue, bool single) { + Coroutine *self = qemu_coroutine_self(); Coroutine *next; - CoQueueNextData *data; if (QTAILQ_EMPTY(&queue->entries)) { return false; } - data = g_slice_new(CoQueueNextData); - data->bh = aio_bh_new(queue->ctx, qemu_co_queue_next_bh, data); - QTAILQ_INIT(&data->entries); - qemu_bh_schedule(data->bh); - while ((next = QTAILQ_FIRST(&queue->entries)) != NULL) { QTAILQ_REMOVE(&queue->entries, next, co_queue_next); - QTAILQ_INSERT_TAIL(&data->entries, next, co_queue_next); + QTAILQ_INSERT_TAIL(&self->co_queue_wakeup, next, co_queue_next); trace_qemu_co_queue_next(next); if (single) { break; diff --git a/qemu-coroutine.c b/qemu-coroutine.c index 60ac79e..423430d 100644 --- a/qemu-coroutine.c +++ b/qemu-coroutine.c @@ -45,6 +45,7 @@ Coroutine *qemu_coroutine_create(CoroutineEntry *entry) } co->entry = entry; + QTAILQ_INIT(&co->co_queue_wakeup); return co; } @@ -87,6 +88,8 @@ static void coroutine_swap(Coroutine *from, Coroutine *to) ret = qemu_coroutine_switch(from, to, COROUTINE_YIELD); + qemu_co_queue_run_restart(to); + switch (ret) { case COROUTINE_YIELD: return; diff --git a/trace-events b/trace-events index 9c73931..f51408a 100644 --- a/trace-events +++ b/trace-events @@ -825,7 +825,7 @@ qemu_coroutine_yield(void *from, void *to) "from %p to %p" qemu_coroutine_terminate(void *co) "self %p" # qemu-coroutine-lock.c -qemu_co_queue_next_bh(void) "" +qemu_co_queue_run_restart(void *co) "co %p" qemu_co_queue_next(void *nxt) "next %p" qemu_co_mutex_lock_entry(void *mutex, void *self) "mutex %p self %p" qemu_co_mutex_lock_return(void *mutex, void *self) "mutex %p self %p" -- 1.8.1.4