From: Liu Ping Fan <pingf...@linux.vnet.ibm.com> After breaking down big lock, nested MMIO request which not targeting at RAM can cause deadlock issue. Supposing the scene: dev_a,b with fine-grain locks lockA/B, then ABBA dealock issue can be triggered. We fix this by tracing and rejecting such request.
Signed-off-by: Liu Ping Fan <pingf...@linux.vnet.ibm.com> --- exec.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ qemu-thread.h | 7 +++++++ 2 files changed, 54 insertions(+), 0 deletions(-) diff --git a/exec.c b/exec.c index fa34ef9..1eb920d 100644 --- a/exec.c +++ b/exec.c @@ -3442,6 +3442,48 @@ static bool address_space_section_lookup_ref(AddressSpace *as, return safe_ref; } +typedef struct ThreadContext { + DispatchType dispatch_type; + unsigned int mmio_req_pending; +} ThreadContext; + +static __thread ThreadContext thread_context = { + .dispatch_type = DISPATCH_INIT, + .mmio_req_pending = 0 +}; + +void qemu_thread_set_dispatch_type(DispatchType type) +{ + thread_context.dispatch_type = type; +} + +void qemu_thread_reset_dispatch_type(void) +{ + thread_context.dispatch_type = DISPATCH_INIT; +} + +static void address_space_check_inc_req_pending(MemoryRegionSection *section) +{ + bool nested = false; + + /* currently, only mmio out of big lock, and need this to avoid dead lock */ + if (thread_context.dispatch_type == DISPATCH_MMIO) { + nested = ++thread_context.mmio_req_pending > 1 ? true : false; + /* To fix, will filter iommu case */ + if (nested && !memory_region_is_ram(section->mr)) { + fprintf(stderr, "mmio: nested target not RAM is not support"); + abort(); + } + } +} + +static void address_space_dec_req_pending(void) +{ + if (thread_context.dispatch_type == DISPATCH_MMIO) { + thread_context.mmio_req_pending--; + } +} + void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf, int len, bool is_write) { @@ -3462,6 +3504,8 @@ void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf, qemu_mutex_lock(&as->lock); safe_ref = memory_region_section_lookup_ref(d, page, &obj_mrs); qemu_mutex_unlock(&as->lock); + address_space_check_inc_req_pending(&obj_mrs); + if (!safe_ref) { qemu_mutex_lock_iothread(); qemu_mutex_lock(&as->lock); @@ -3477,6 +3521,7 @@ void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf, if (is_write) { if (!memory_region_is_ram(section->mr)) { target_phys_addr_t addr1; + addr1 = memory_region_section_addr(section, addr); /* XXX: could force cpu_single_env to NULL to avoid potential bugs */ @@ -3510,6 +3555,7 @@ void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf, if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { target_phys_addr_t addr1; + /* I/O case */ addr1 = memory_region_section_addr(section, addr); if (l >= 4 && ((addr1 & 3) == 0)) { @@ -3537,6 +3583,7 @@ void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf, qemu_put_ram_ptr(ptr); } } + address_space_dec_req_pending(); memory_region_section_unref(&obj_mrs); len -= l; buf += l; diff --git a/qemu-thread.h b/qemu-thread.h index 05fdaaf..fc9e17b 100644 --- a/qemu-thread.h +++ b/qemu-thread.h @@ -7,6 +7,11 @@ typedef struct QemuMutex QemuMutex; typedef struct QemuCond QemuCond; typedef struct QemuThread QemuThread; +typedef enum { + DISPATCH_INIT = 0, + DISPATCH_MMIO, + DISPATCH_IO, +} DispatchType; #ifdef _WIN32 #include "qemu-thread-win32.h" @@ -46,4 +51,6 @@ void qemu_thread_get_self(QemuThread *thread); bool qemu_thread_is_self(QemuThread *thread); void qemu_thread_exit(void *retval); +void qemu_thread_set_dispatch_type(DispatchType type); +void qemu_thread_reset_dispatch_type(void); #endif -- 1.7.4.4