Synchronously check and update the address space for the
current cpu for any slow path access.

Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1866
Signed-off-by: Richard Henderson <richard.hender...@linaro.org>
---
 include/exec/memory.h |  6 ++++++
 accel/tcg/cputlb.c    |  2 ++
 softmmu/physmem.c     | 20 ++++++++++++++++++++
 3 files changed, 28 insertions(+)

diff --git a/include/exec/memory.h b/include/exec/memory.h
index 68284428f8..7ec842076f 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -2780,6 +2780,12 @@ void address_space_cache_destroy(MemoryRegionCache 
*cache);
 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
                                             bool is_write, MemTxAttrs attrs);
 
+/*
+ * Ensure all cpu address spaces are up-to-date.
+ * Return true if changes made and tlb flushed.
+ */
+void cpu_address_space_sync(CPUState *cpu);
+
 /* address_space_translate: translate an address range into an address space
  * into a MemoryRegion and an address range into that section.  Should be
  * called from an RCU critical section, to avoid that the last reference
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 3270f65c20..91be3f3064 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1827,6 +1827,8 @@ static bool mmu_lookup(CPUArchState *env, vaddr addr, 
MemOpIdx oi,
     l->page[1].size = 0;
     crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
 
+    cpu_address_space_sync(env_cpu(env));
+
     if (likely(!crosspage)) {
         mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra);
 
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index e1c535380a..5a89caa257 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -2536,6 +2536,26 @@ static void tcg_commit(MemoryListener *listener)
     }
 }
 
+void cpu_address_space_sync(CPUState *cpu)
+{
+    int i, n = cpu->num_ases;
+    bool need_flush = false;
+
+    for (i = 0; i < n; ++i) {
+        CPUAddressSpace *cpuas = &cpu->cpu_ases[i];
+        uint32_t gen = qatomic_load_acquire(&cpuas->layout_gen);
+
+        if (cpuas->commit_gen != gen) {
+            cpuas->commit_gen = gen;
+            cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as);
+            need_flush = true;
+        }
+    }
+    if (need_flush) {
+        tlb_flush(cpu);
+    }
+}
+
 static void memory_map_init(void)
 {
     system_memory = g_malloc(sizeof(*system_memory));
-- 
2.34.1


Reply via email to