The goal is to split the functions such that cpu-exec is CPU specific
content, while cpus.c is generic code only. The function interface to
cpu-exec needs to be virtualised to prepare support for multi-arch and
moving these definitions out saves bloating the QOM interface. So
move these definitions out of cpu-exec to the architecture independent
cpus.c.

Signed-off-by: Peter Crosthwaite <crosthwaite.pe...@gmail.com>
---
 cpu-exec.c | 49 -------------------------------------------------
 cpus.c     | 49 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 49 insertions(+), 49 deletions(-)

diff --git a/cpu-exec.c b/cpu-exec.c
index 0266609..dbea47c 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -25,7 +25,6 @@
 #include "sysemu/qtest.h"
 #include "qemu/timer.h"
 #include "exec/address-spaces.h"
-#include "exec/memory-internal.h"
 #include "qemu/rcu.h"
 
 /* -icount align implementation. */
@@ -127,52 +126,6 @@ static void init_delay_params(SyncClocks *sc, const 
CPUState *cpu)
 }
 #endif /* CONFIG USER ONLY */
 
-void cpu_loop_exit(CPUState *cpu)
-{
-    cpu->current_tb = NULL;
-    siglongjmp(cpu->jmp_env, 1);
-}
-
-/* exit the current TB from a signal handler. The host registers are
-   restored in a state compatible with the CPU emulator
- */
-#if defined(CONFIG_SOFTMMU)
-void cpu_resume_from_signal(CPUState *cpu, void *puc)
-{
-    /* XXX: restore cpu registers saved in host registers */
-
-    cpu->exception_index = -1;
-    siglongjmp(cpu->jmp_env, 1);
-}
-
-void cpu_reload_memory_map(CPUState *cpu)
-{
-    AddressSpaceDispatch *d;
-
-    if (qemu_in_vcpu_thread()) {
-        /* Do not let the guest prolong the critical section as much as it
-         * as it desires.
-         *
-         * Currently, this is prevented by the I/O thread's periodinc kicking
-         * of the VCPU thread (iothread_requesting_mutex, qemu_cpu_kick_thread)
-         * but this will go away once TCG's execution moves out of the global
-         * mutex.
-         *
-         * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), 
which
-         * only protects cpu->as->dispatch.  Since we reload it below, we can
-         * split the critical section.
-         */
-        rcu_read_unlock();
-        rcu_read_lock();
-    }
-
-    /* The CPU and TLB are protected by the iothread lock.  */
-    d = atomic_rcu_read(&cpu->as->dispatch);
-    cpu->memory_dispatch = d;
-    tlb_flush(cpu, 1);
-}
-#endif
-
 /* Execute a TB, and fix up the CPU state afterwards if necessary */
 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
 {
@@ -344,8 +297,6 @@ static void cpu_handle_debug_exception(CPUState *cpu)
 
 /* main execution loop */
 
-volatile sig_atomic_t exit_request;
-
 int cpu_exec(CPUState *cpu)
 {
     CPUClass *cc = CPU_GET_CLASS(cpu);
diff --git a/cpus.c b/cpus.c
index c8a2911..2dc4a9a 100644
--- a/cpus.c
+++ b/cpus.c
@@ -28,6 +28,7 @@
 #include "monitor/monitor.h"
 #include "qapi/qmp/qerror.h"
 #include "sysemu/sysemu.h"
+#include "exec/memory-internal.h"
 #include "exec/gdbstub.h"
 #include "sysemu/dma.h"
 #include "sysemu/kvm.h"
@@ -64,6 +65,8 @@
 
 #endif /* CONFIG_LINUX */
 
+volatile sig_atomic_t exit_request;
+
 static CPUState *next_cpu;
 int64_t max_delay;
 int64_t max_advance;
@@ -1394,6 +1397,52 @@ static void tcg_exec_all(void)
     exit_request = 0;
 }
 
+/* exit the current TB from a signal handler. The host registers are
+   restored in a state compatible with the CPU emulator
+ */
+#if defined(CONFIG_SOFTMMU)
+void cpu_resume_from_signal(CPUState *cpu, void *puc)
+{
+    /* XXX: restore cpu registers saved in host registers */
+
+    cpu->exception_index = -1;
+    siglongjmp(cpu->jmp_env, 1);
+}
+
+void cpu_reload_memory_map(CPUState *cpu)
+{
+    AddressSpaceDispatch *d;
+
+    if (qemu_in_vcpu_thread()) {
+        /* Do not let the guest prolong the critical section as much as it
+         * as it desires.
+         *
+         * Currently, this is prevented by the I/O thread's periodinc kicking
+         * of the VCPU thread (iothread_requesting_mutex, qemu_cpu_kick_thread)
+         * but this will go away once TCG's execution moves out of the global
+         * mutex.
+         *
+         * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), 
which
+         * only protects cpu->as->dispatch.  Since we reload it below, we can
+         * split the critical section.
+         */
+        rcu_read_unlock();
+        rcu_read_lock();
+    }
+
+    /* The CPU and TLB are protected by the iothread lock.  */
+    d = atomic_rcu_read(&cpu->as->dispatch);
+    cpu->memory_dispatch = d;
+    tlb_flush(cpu, 1);
+}
+#endif
+
+void cpu_loop_exit(CPUState *cpu)
+{
+    cpu->current_tb = NULL;
+    siglongjmp(cpu->jmp_env, 1);
+}
+
 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
 {
     /* XXX: implement xxx_cpu_list for targets that still miss it */
-- 
1.9.1


Reply via email to