This primitive lets you lock/unlock a CoMutex, guaranteeing neither blocking nor cacheline bouncing if there is no qemu_co_mutex_lock critical section.
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- include/qemu/coroutine.h | 6 ++++++ util/qemu-coroutine-lock.c | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h index a4509bd..8d4416c 100644 --- a/include/qemu/coroutine.h +++ b/include/qemu/coroutine.h @@ -157,6 +157,12 @@ void qemu_co_mutex_init(CoMutex *mutex); void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex); /** + * Locks the mutex and immediately unlock it. This is faster than back-to-back + * lock/unlock if the mutex is not taken by anyone. + */ +void coroutine_fn qemu_co_mutex_lock_unlock(CoMutex *mutex); + +/** * Unlocks the mutex and schedules the next coroutine that was waiting for this * lock to be run. */ diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c index 6328eed..86f56cd 100644 --- a/util/qemu-coroutine-lock.c +++ b/util/qemu-coroutine-lock.c @@ -287,6 +287,42 @@ retry_fast_path: self->locks_held++; } +void coroutine_fn qemu_co_mutex_lock_unlock(CoMutex *mutex) +{ + AioContext *ctx = qemu_get_current_aio_context(); + int waiters, i; + +retry_fast_path: + waiters = atomic_read(&mutex->locked); + if (waiters == 0) { + /* Provide same memory ordering semantics as mutex lock/unlock. */ + smp_mb_acquire(); + smp_mb_release(); + return; + } + + i = 0; + while (waiters == 1 && ++i < 1000) { + if (atomic_read(&mutex->ctx) == ctx) { + break; + } + waiters = atomic_read(&mutex->locked); + if (waiters == 0) { + smp_mb_acquire(); + smp_mb_release(); + return; + } + cpu_relax(); + } + + if (atomic_cmpxchg(&mutex->locked, waiters, waiters + 1) != waiters) { + goto retry_fast_path; + } + + qemu_co_mutex_lock_slowpath(ctx, mutex); + qemu_co_mutex_unlock(mutex); +} + void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex) { Coroutine *self = qemu_coroutine_self(); -- 2.9.3