Different architectures will deal with MMIO exits differently. For example,
KVM_EXIT_IO is x86-specific, and I/O cycles are often synthesised by steering
into windows in PCI bridges on other architectures.
This patch calls arch-specific kvm_cpu__emulate_io() and kvm_cpu__emulate_mmio()
from the main runloop's IO and MMIO exit handlers. For x86, these directly
call kvm__emulate_io() and kvm__emulate_mmio() but other architectures will
perform some address munging before passing on the call.
Signed-off-by: Matt Evans
---
tools/kvm/kvm-cpu.c | 34 +++---
tools/kvm/x86/include/kvm/kvm-cpu-arch.h | 17 ++-
2 files changed, 33 insertions(+), 18 deletions(-)
diff --git a/tools/kvm/kvm-cpu.c b/tools/kvm/kvm-cpu.c
index 884a89f..4df9ead 100644
--- a/tools/kvm/kvm-cpu.c
+++ b/tools/kvm/kvm-cpu.c
@@ -52,11 +52,11 @@ static void kvm_cpu__handle_coalesced_mmio(struct kvm_cpu
*cpu)
while (cpu->ring->first != cpu->ring->last) {
struct kvm_coalesced_mmio *m;
m = &cpu->ring->coalesced_mmio[cpu->ring->first];
- kvm__emulate_mmio(cpu->kvm,
- m->phys_addr,
- m->data,
- m->len,
- 1);
+ kvm_cpu__emulate_mmio(cpu->kvm,
+ m->phys_addr,
+ m->data,
+ m->len,
+ 1);
cpu->ring->first = (cpu->ring->first + 1) %
KVM_COALESCED_MMIO_MAX;
}
}
@@ -106,13 +106,13 @@ int kvm_cpu__start(struct kvm_cpu *cpu)
case KVM_EXIT_IO: {
bool ret;
- ret = kvm__emulate_io(cpu->kvm,
- cpu->kvm_run->io.port,
- (u8 *)cpu->kvm_run +
- cpu->kvm_run->io.data_offset,
- cpu->kvm_run->io.direction,
- cpu->kvm_run->io.size,
- cpu->kvm_run->io.count);
+ ret = kvm_cpu__emulate_io(cpu->kvm,
+ cpu->kvm_run->io.port,
+ (u8 *)cpu->kvm_run +
+ cpu->kvm_run->io.data_offset,
+ cpu->kvm_run->io.direction,
+ cpu->kvm_run->io.size,
+ cpu->kvm_run->io.count);
if (!ret)
goto panic_kvm;
@@ -121,11 +121,11 @@ int kvm_cpu__start(struct kvm_cpu *cpu)
case KVM_EXIT_MMIO: {
bool ret;
- ret = kvm__emulate_mmio(cpu->kvm,
- cpu->kvm_run->mmio.phys_addr,
- cpu->kvm_run->mmio.data,
- cpu->kvm_run->mmio.len,
- cpu->kvm_run->mmio.is_write);
+ ret = kvm_cpu__emulate_mmio(cpu->kvm,
+
cpu->kvm_run->mmio.phys_addr,
+ cpu->kvm_run->mmio.data,
+ cpu->kvm_run->mmio.len,
+
cpu->kvm_run->mmio.is_write);
if (!ret)
goto panic_kvm;
diff --git a/tools/kvm/x86/include/kvm/kvm-cpu-arch.h
b/tools/kvm/x86/include/kvm/kvm-cpu-arch.h
index ed1c727..f138a92 100644
--- a/tools/kvm/x86/include/kvm/kvm-cpu-arch.h
+++ b/tools/kvm/x86/include/kvm/kvm-cpu-arch.h
@@ -4,7 +4,8 @@
/* Architecture-specific kvm_cpu definitions. */
#include /* for struct kvm_regs */
-
+#include "kvm/kvm.h" /* for kvm__emulate_{mm}io() */
+#include
#include
struct kvm;
@@ -30,4 +31,18 @@ struct kvm_cpu {
struct kvm_coalesced_mmio_ring *ring;
};
+/*
+ * As these are such simple wrappers, let's have them in the header so they'll
+ * be cheaper to call:
+ */
+static inline bool kvm_cpu__emulate_io(struct kvm *kvm, u16 port, void *data,
int direction, int size, u32 count)
+{
+ return kvm__emulate_io(kvm, port, data, direction, size, count);
+}
+
+static inline bool kvm_cpu__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8
*data, u32 len, u8 is_write)
+{
+ return kvm_cpu__emulate_mmio(kvm, phys_addr, data, len, is_write);
+}
+
#endif /* KVM__KVM_CPU_ARCH_H */
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc"