Just a wrapper to simplify what is available to the struct AioContext. Signed-off-by: Emanuele Giuseppe Esposito <eespo...@redhat.com> --- block/graph-lock.c | 59 ++++++++++++++++++++++++++------------ include/block/aio.h | 12 ++++---- include/block/graph-lock.h | 1 + 3 files changed, 48 insertions(+), 24 deletions(-)
diff --git a/block/graph-lock.c b/block/graph-lock.c index b608a89d7c..c3c6eeedad 100644 --- a/block/graph-lock.c +++ b/block/graph-lock.c @@ -44,12 +44,23 @@ static uint32_t orphaned_reader_count; /* Queue of readers waiting for the writer to finish */ static CoQueue reader_queue; +struct BdrvGraphRWlock { + /* How many readers are currently reading the graph. */ + uint32_t reader_count; + + /* + * List of BdrvGraphRWlock kept in graph-lock.c + * Protected by aio_context_list_lock + */ + QTAILQ_ENTRY(BdrvGraphRWlock) next_aio; +}; + /* - * List of AioContext. This list ensures that each AioContext + * List of BdrvGraphRWlock. This list ensures that each BdrvGraphRWlock * can safely modify only its own counter, avoid reading/writing * others and thus improving performances by avoiding cacheline bounces. */ -static QTAILQ_HEAD(, AioContext) aio_context_list = +static QTAILQ_HEAD(, BdrvGraphRWlock) aio_context_list = QTAILQ_HEAD_INITIALIZER(aio_context_list); static void __attribute__((__constructor__)) bdrv_init_graph_lock(void) @@ -60,29 +71,31 @@ static void __attribute__((__constructor__)) bdrv_init_graph_lock(void) void register_aiocontext(AioContext *ctx) { + ctx->bdrv_graph = g_new0(BdrvGraphRWlock, 1); QEMU_LOCK_GUARD(&aio_context_list_lock); - assert(ctx->reader_count == 0); - QTAILQ_INSERT_TAIL(&aio_context_list, ctx, next_aio); + assert(ctx->bdrv_graph->reader_count == 0); + QTAILQ_INSERT_TAIL(&aio_context_list, ctx->bdrv_graph, next_aio); } void unregister_aiocontext(AioContext *ctx) { QEMU_LOCK_GUARD(&aio_context_list_lock); - orphaned_reader_count += ctx->reader_count; - QTAILQ_REMOVE(&aio_context_list, ctx, next_aio); + orphaned_reader_count += ctx->bdrv_graph->reader_count; + QTAILQ_REMOVE(&aio_context_list, ctx->bdrv_graph, next_aio); + g_free(ctx->bdrv_graph); } static uint32_t reader_count(void) { - AioContext *ctx; + BdrvGraphRWlock *brdv_graph; uint32_t rd; QEMU_LOCK_GUARD(&aio_context_list_lock); /* rd can temporarly be negative, but the total will *always* be >= 0 */ rd = orphaned_reader_count; - QTAILQ_FOREACH(ctx, &aio_context_list, next_aio) { - rd += qatomic_read(&ctx->reader_count); + QTAILQ_FOREACH(brdv_graph, &aio_context_list, next_aio) { + rd += qatomic_read(&brdv_graph->reader_count); } /* shouldn't overflow unless there are 2^31 readers */ @@ -138,12 +151,17 @@ void bdrv_graph_wrunlock(void) void coroutine_fn bdrv_graph_co_rdlock(void) { - AioContext *aiocontext; - aiocontext = qemu_get_current_aio_context(); + BdrvGraphRWlock *bdrv_graph; + bdrv_graph = qemu_get_current_aio_context()->bdrv_graph; + + /* Do not lock if in main thread */ + if (qemu_in_main_thread()) { + return; + } for (;;) { - qatomic_set(&aiocontext->reader_count, - aiocontext->reader_count + 1); + qatomic_set(&bdrv_graph->reader_count, + bdrv_graph->reader_count + 1); /* make sure writer sees reader_count before we check has_writer */ smp_mb(); @@ -192,7 +210,7 @@ void coroutine_fn bdrv_graph_co_rdlock(void) } /* slow path where reader sleeps */ - aiocontext->reader_count--; + bdrv_graph->reader_count--; aio_wait_kick(); qemu_co_queue_wait(&reader_queue, &aio_context_list_lock); } @@ -201,11 +219,16 @@ void coroutine_fn bdrv_graph_co_rdlock(void) void coroutine_fn bdrv_graph_co_rdunlock(void) { - AioContext *aiocontext; - aiocontext = qemu_get_current_aio_context(); + BdrvGraphRWlock *bdrv_graph; + bdrv_graph = qemu_get_current_aio_context()->bdrv_graph; + + /* Do not lock if in main thread */ + if (qemu_in_main_thread()) { + return; + } - qatomic_store_release(&aiocontext->reader_count, - aiocontext->reader_count - 1); + qatomic_store_release(&bdrv_graph->reader_count, + bdrv_graph->reader_count - 1); /* make sure writer sees reader_count before we check has_writer */ smp_mb(); diff --git a/include/block/aio.h b/include/block/aio.h index 8e64f81d01..0f65a3cc9e 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -22,6 +22,7 @@ #include "qemu/event_notifier.h" #include "qemu/thread.h" #include "qemu/timer.h" +#include "block/graph-lock.h" typedef struct BlockAIOCB BlockAIOCB; typedef void BlockCompletionFunc(void *opaque, int ret); @@ -127,14 +128,13 @@ struct AioContext { /* Used by AioContext users to protect from multi-threaded access. */ QemuRecMutex lock; - /* How many readers in this AioContext are currently reading the graph. */ - uint32_t reader_count; - /* - * List of AioContext kept in graph-lock.c - * Protected by aio_context_list_lock + * Keep track of readers and writers of the block layer graph. + * This is essential to avoid performing additions and removal + * of nodes and edges from block graph while some + * other thread is traversing it. */ - QTAILQ_ENTRY(AioContext) next_aio; + BdrvGraphRWlock *bdrv_graph; /* The list of registered AIO handlers. Protected by ctx->list_lock. */ AioHandlerList aio_handlers; diff --git a/include/block/graph-lock.h b/include/block/graph-lock.h index f975312bb6..fc806aefa3 100644 --- a/include/block/graph-lock.h +++ b/include/block/graph-lock.h @@ -53,6 +53,7 @@ * reader count. In that case we transfer the count to a global shared counter * so that the writer is always aware of all readers. */ +typedef struct BdrvGraphRWlock BdrvGraphRWlock; /* * register_aiocontext: -- 2.31.1