Convert core code usages of tcg_enabled() which don't have a specific CPU associated with, to either tcg_any_enabled() or tcg_all_enabled(). This is to prepare support for multiple tcg engines, where queries must query a specific CPU or use global any/all logic.
Signed-off-by: Peter Crosthwaite <crosthwaite.pe...@gmail.com> --- cpus.c | 2 +- exec.c | 4 ++-- include/exec/ram_addr.h | 5 +++-- memory.c | 8 ++++---- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/cpus.c b/cpus.c index f6b448b..cd25c8c 100644 --- a/cpus.c +++ b/cpus.c @@ -1159,7 +1159,7 @@ void qemu_mutex_lock_iothread(void) /* In the simple case there is no need to bump the VCPU thread out of * TCG code execution. */ - if (!tcg_enabled() || qemu_in_vcpu_thread() || + if (!tcg_any_enabled() || qemu_in_vcpu_thread() || !first_cpu || !first_cpu->thread) { qemu_mutex_lock(&qemu_global_mutex); atomic_dec(&iothread_requesting_mutex); diff --git a/exec.c b/exec.c index 7d60e15..e5101e0 100644 --- a/exec.c +++ b/exec.c @@ -926,7 +926,7 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client], page, end - page); - if (dirty && tcg_enabled()) { + if (dirty && tcg_any_enabled()) { tlb_reset_dirty_range_all(start, length); } @@ -2589,7 +2589,7 @@ void cpu_flush_icache_range(hwaddr start, int len) * so there is no need to flush anything. For KVM / Xen we need to flush * the host's instruction cache at least. */ - if (tcg_enabled()) { + if (!tcg_all_enabled()) { return; } diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index c113f21..2e8fdd1 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -164,14 +164,15 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp); atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp); - if (tcg_enabled()) { + if (tcg_any_enabled()) { atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp); } } } xen_modified_memory(start, pages << TARGET_PAGE_BITS); } else { - uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE; + uint8_t clients = tcg_any_enabled() ? DIRTY_CLIENTS_ALL + : DIRTY_CLIENTS_NOCODE; /* * bitmap-traveling is faster than memory-traveling (for addr...) * especially when most of the memory is not dirty. diff --git a/memory.c b/memory.c index 0acebb1..0356ccd9 100644 --- a/memory.c +++ b/memory.c @@ -1203,7 +1203,7 @@ void memory_region_init_ram(MemoryRegion *mr, mr->terminates = true; mr->destructor = memory_region_destructor_ram; mr->ram_addr = qemu_ram_alloc(size, mr, errp); - mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; + mr->dirty_log_mask = tcg_any_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; } void memory_region_init_resizeable_ram(MemoryRegion *mr, @@ -1221,7 +1221,7 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr, mr->terminates = true; mr->destructor = memory_region_destructor_ram; mr->ram_addr = qemu_ram_alloc_resizeable(size, max_size, resized, mr, errp); - mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; + mr->dirty_log_mask = tcg_any_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; } #ifdef __linux__ @@ -1238,7 +1238,7 @@ void memory_region_init_ram_from_file(MemoryRegion *mr, mr->terminates = true; mr->destructor = memory_region_destructor_ram; mr->ram_addr = qemu_ram_alloc_from_file(size, mr, share, path, errp); - mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; + mr->dirty_log_mask = tcg_any_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; } #endif @@ -1252,7 +1252,7 @@ void memory_region_init_ram_ptr(MemoryRegion *mr, mr->ram = true; mr->terminates = true; mr->destructor = memory_region_destructor_ram_from_ptr; - mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; + mr->dirty_log_mask = tcg_any_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ assert(ptr != NULL); -- 1.9.1