@cflags_next_tb is specific to TCG accelerator, move it to
its AccelCPUState.

Signed-off-by: Philippe Mathieu-Daudé <phi...@linaro.org>
Reviewed-by: Richard Henderson <richard.hender...@linaro.org>
Message-Id: <20240428221450.26460-19-phi...@linaro.org>
---
 accel/tcg/vcpu-state.h    |  2 ++
 include/hw/core/cpu.h     |  1 -
 accel/tcg/cpu-exec.c      | 12 ++++++------
 accel/tcg/tb-maint.c      |  4 ++--
 accel/tcg/tcg-accel-ops.c |  1 +
 accel/tcg/translate-all.c |  2 +-
 accel/tcg/watchpoint.c    |  5 +++--
 hw/core/cpu-common.c      |  1 -
 8 files changed, 15 insertions(+), 13 deletions(-)

diff --git a/accel/tcg/vcpu-state.h b/accel/tcg/vcpu-state.h
index 3a0ea2d47a..5b09279140 100644
--- a/accel/tcg/vcpu-state.h
+++ b/accel/tcg/vcpu-state.h
@@ -13,6 +13,8 @@
  * @plugin_state: per-CPU plugin state
  */
 struct AccelCPUState {
+    uint32_t cflags_next_tb;
+
     sigjmp_buf jmp_env;
 
 #ifdef CONFIG_USER_ONLY
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index f1fe43dbea..97a0baf874 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -468,7 +468,6 @@ struct CPUState {
     bool crash_occurred;
     bool exit_request;
     int exclusive_context_count;
-    uint32_t cflags_next_tb;
     /* updates protected by BQL */
     uint32_t interrupt_request;
     int singlestep_enabled;
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 46ad16f911..55235d3e5e 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -720,7 +720,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int 
*ret)
         if (replay_has_exception()
             && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) {
             /* Execute just one insn to trigger exception pending in the log */
-            cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
+            cpu->accel->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
                 | CF_NOIRQ | 1;
         }
 #endif
@@ -783,7 +783,7 @@ static inline bool icount_exit_request(CPUState *cpu)
     if (!icount_enabled()) {
         return false;
     }
-    if (cpu->cflags_next_tb != -1 && !(cpu->cflags_next_tb & CF_USE_ICOUNT)) {
+    if (!(cpu->accel->cflags_next_tb == -1 || cpu->accel->cflags_next_tb & 
CF_USE_ICOUNT)) {
         return false;
     }
     return cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0;
@@ -797,7 +797,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
      * skip checking here. Any pending interrupts will get picked up
      * by the next TB we execute under normal cflags.
      */
-    if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) {
+    if (cpu->accel->cflags_next_tb != -1 && cpu->accel->cflags_next_tb & 
CF_NOIRQ) {
         return false;
     }
 
@@ -947,7 +947,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, 
TranslationBlock *tb,
     if (insns_left > 0 && insns_left < tb->icount)  {
         assert(insns_left <= CF_COUNT_MASK);
         assert(cpu->icount_extra == 0);
-        cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left;
+        cpu->accel->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | 
insns_left;
     }
 #endif
 }
@@ -979,11 +979,11 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
              * have CF_INVALID set, -1 is a convenient invalid value that
              * does not require tcg headers for cpu_common_reset.
              */
-            cflags = cpu->cflags_next_tb;
+            cflags = cpu->accel->cflags_next_tb;
             if (cflags == -1) {
                 cflags = curr_cflags(cpu);
             } else {
-                cpu->cflags_next_tb = -1;
+                cpu->accel->cflags_next_tb = -1;
             }
 
             if (check_for_breakpoints(cpu, pc, &cflags)) {
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index 19ae6793f3..2d5faca9fd 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -1084,7 +1084,7 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, 
uintptr_t pc)
     if (current_tb_modified) {
         /* Force execution of one insn next time.  */
         CPUState *cpu = current_cpu;
-        cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
+        cpu->accel->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
         return true;
     }
     return false;
@@ -1154,7 +1154,7 @@ tb_invalidate_phys_page_range__locked(struct 
page_collection *pages,
     if (current_tb_modified) {
         page_collection_unlock(pages);
         /* Force execution of one insn next time.  */
-        current_cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
+        current_cpu->accel->cflags_next_tb = 1 | CF_NOIRQ | 
curr_cflags(current_cpu);
         mmap_unlock();
         cpu_loop_exit_noexc(current_cpu);
     }
diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c
index 56bbad9fcd..d9132a5835 100644
--- a/accel/tcg/tcg-accel-ops.c
+++ b/accel/tcg/tcg-accel-ops.c
@@ -89,6 +89,7 @@ static void tcg_cpu_reset_hold(CPUState *cpu)
 
     qatomic_set(&cpu->neg.icount_decr.u32, 0);
     cpu->neg.can_do_io = true;
+    cpu->accel->cflags_next_tb = -1;
 }
 
 /* mask must never be zero, except for A20 change call */
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index b67adce20e..3a8199a761 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -631,7 +631,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
      * operations only (which execute after completion) so we don't
      * double instrument the instruction.
      */
-    cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | n;
+    cpu->accel->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | n;
 
     if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
         vaddr pc = cpu->cc->get_pc(cpu);
diff --git a/accel/tcg/watchpoint.c b/accel/tcg/watchpoint.c
index d3aab11458..0a40bfdc85 100644
--- a/accel/tcg/watchpoint.c
+++ b/accel/tcg/watchpoint.c
@@ -26,6 +26,7 @@
 #include "sysemu/replay.h"
 #include "hw/core/tcg-cpu-ops.h"
 #include "hw/core/cpu.h"
+#include "accel/tcg/vcpu-state.h"
 
 /*
  * Return true if this watchpoint address matches the specified
@@ -100,7 +101,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr 
len,
                  */
                 if (!cpu->neg.can_do_io) {
                     /* Force execution of one insn next time.  */
-                    cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
+                    cpu->accel->cflags_next_tb = 1 | CF_NOIRQ | 
curr_cflags(cpu);
                     cpu_loop_exit_restore(cpu, ra);
                 }
                 /*
@@ -132,7 +133,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr 
len,
                 cpu_loop_exit(cpu);
             } else {
                 /* Force execution of one insn next time.  */
-                cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
+                cpu->accel->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
                 mmap_unlock();
                 cpu_loop_exit_noexc(cpu);
             }
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
index 0e5ebbe050..073eb75ad0 100644
--- a/hw/core/cpu-common.c
+++ b/hw/core/cpu-common.c
@@ -124,7 +124,6 @@ static void cpu_common_reset_hold(Object *obj, ResetType 
type)
     cpu->icount_extra = 0;
     cpu->exception_index = -1;
     cpu->crash_occurred = false;
-    cpu->cflags_next_tb = -1;
 
     cpu_exec_reset_hold(cpu);
 }
-- 
2.41.0


Reply via email to