On Fri, 04/15 13:31, Paolo Bonzini wrote: > This provides the infrastructure to start a coroutine on a remote > AioContext. It will be used by CoMutex and CoQueue, so that > coroutines don't jump from one context to another when they > go to sleep on a mutex or waitqueue. > > aio_co_schedule is based on a lock-free multiple-producer, > single-consumer queue. The multiple producers use cmpxchg to add > to a LIFO stacks. The consumer (a per-AioContext bottom half) grabs
s/stacks/stack/ ? > all items added so far, inverts the list to make it FIFO, and goes > through it one item at a time. > > Most of the changes really are in infrastructure and tests. > > Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> > --- > async.c | 38 ++++++++ > include/block/aio.h | 9 ++ > include/qemu/coroutine_int.h | 10 +- > tests/Makefile | 11 ++- > tests/iothread.c | 91 ++++++++++++++++++ > tests/iothread.h | 25 +++++ > tests/test-aio-multithread.c | 220 > +++++++++++++++++++++++++++++++++++++++++++ > trace-events | 4 + > 8 files changed, 404 insertions(+), 4 deletions(-) > create mode 100644 tests/iothread.c > create mode 100644 tests/iothread.h > create mode 100644 tests/test-aio-multithread.c > > diff --git a/async.c b/async.c > index acd3627..ef8b409 100644 > --- a/async.c > +++ b/async.c > @@ -30,6 +30,8 @@ > #include "qemu/main-loop.h" > #include "qemu/atomic.h" > #include "block/raw-aio.h" > +#include "trace/generated-tracers.h" > +#include "qemu/coroutine_int.h" > > /***********************************************************/ > /* bottom halves (can be seen as timers which expire ASAP) */ > @@ -255,6 +257,8 @@ aio_ctx_finalize(GSource *source) > } > #endif > > + qemu_bh_delete(ctx->schedule_bh); > + > qemu_lockcnt_lock(&ctx->list_lock); > assert(!qemu_lockcnt_count(&ctx->list_lock)); > while (ctx->first_bh) { > @@ -335,6 +339,28 @@ static void event_notifier_dummy_cb(EventNotifier *e) > { > } > > +static void schedule_bh_cb(void *opaque) > +{ > + AioContext *ctx = opaque; > + QSLIST_HEAD(, Coroutine) straight, reversed; > + > + QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines); > + QSLIST_INIT(&straight); > + > + while (!QSLIST_EMPTY(&reversed)) { > + Coroutine *co = QSLIST_FIRST(&reversed); > + QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next); > + QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next); > + } > + > + while (!QSLIST_EMPTY(&straight)) { > + Coroutine *co = QSLIST_FIRST(&straight); > + QSLIST_REMOVE_HEAD(&straight, co_scheduled_next); > + trace_aio_schedule_bh_cb(ctx, co); > + qemu_coroutine_enter(co, NULL); > + } > +} > + > AioContext *aio_context_new(Error **errp) > { > int ret; > diff --git a/include/block/aio.h b/include/block/aio.h > index 8f55d1a..0a344c3 100644 > --- a/include/block/aio.h > +++ b/include/block/aio.h > @@ -46,6 +46,7 @@ typedef struct AioHandler AioHandler; > typedef void QEMUBHFunc(void *opaque); > typedef void IOHandler(void *opaque); > > +struct Coroutine; > struct ThreadPool; > struct LinuxAioState; > > @@ -107,6 +108,9 @@ struct AioContext { > bool notified; > EventNotifier notifier; > > + QSLIST_HEAD(, Coroutine) scheduled_coroutines; > + QEMUBH *schedule_bh; Maybe rename it to co_schedule_bh to reflect it's used for schedule coroutines? > + > /* Thread pool for performing work and receiving completion callbacks. > * Has its own locking. > */ > +int main(int argc, char **argv) > +{ > + init_clocks(); > + > + g_test_init(&argc, &argv, NULL); > + g_test_add_func("/aio/multi/lifecycle", test_lifecycle); > + if (g_test_quick()) { > + g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_1); > + } else { > + g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_10); Should they use different path names, like /aio/multi/schedule/{1,10}? > + } > + return g_test_run(); > +} Fam