Re: [PATCH v5 07/13] ARM: KVM: Emulation framework and CP15 emulation
On Mon, Dec 12, 2011 at 8:44 AM, Avi Kivity wrote: > On 12/11/2011 12:25 PM, Christoffer Dall wrote: >> From: Christoffer Dall >> >> Adds a new important function in the main KVM/ARM code called >> handle_exit() which is called from kvm_arch_vcpu_ioctl_run() on returns >> from guest execution. This function examines the Hyp-Syndrome-Register >> (HSR), which contains information telling KVM what caused the exit from >> the guest. >> >> Some of the reasons for an exit are CP15 accesses, which are >> not allowed from the guest and this commits handles these exits by > > commit > >> emulating the intented operation in software and skip the guest > > intended > >> instruction. >> >> @@ -306,6 +307,62 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) >> return 0; >> } >> >> +static inline int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, >> + int exception_index) >> +{ >> + unsigned long hsr_ec; >> + >> + if (exception_index == ARM_EXCEPTION_IRQ) >> + return 0; >> + >> + if (exception_index != ARM_EXCEPTION_HVC) { >> + kvm_err(-EINVAL, "Unsupported exception type"); >> + return -EINVAL; >> + } >> + >> + hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT; >> + switch (hsr_ec) { >> + case HSR_EC_WFI: >> + return kvm_handle_wfi(vcpu, run); >> + case HSR_EC_CP15_32: >> + case HSR_EC_CP15_64: >> + return kvm_handle_cp15_access(vcpu, run); >> + case HSR_EC_CP14_MR: >> + return kvm_handle_cp14_access(vcpu, run); >> + case HSR_EC_CP14_LS: >> + return kvm_handle_cp14_load_store(vcpu, run); >> + case HSR_EC_CP14_64: >> + return kvm_handle_cp14_access(vcpu, run); >> + case HSR_EC_CP_0_13: >> + return kvm_handle_cp_0_13_access(vcpu, run); >> + case HSR_EC_CP10_ID: >> + return kvm_handle_cp10_id(vcpu, run); >> + case HSR_EC_SVC_HYP: >> + /* SVC called from Hyp mode should never get here */ >> + kvm_msg("SVC called from Hyp mode shouldn't go here"); >> + BUG(); >> + case HSR_EC_HVC: >> + kvm_msg("hvc: %x (at %08x)", vcpu->arch.hsr & ((1 << 16) - 1), >> + vcpu->arch.regs.pc); >> + kvm_msg(" HSR: %8x", vcpu->arch.hsr); >> + break; >> + case HSR_EC_IABT: >> + case HSR_EC_DABT: >> + return kvm_handle_guest_abort(vcpu, run); >> + case HSR_EC_IABT_HYP: >> + case HSR_EC_DABT_HYP: >> + /* The hypervisor should never cause aborts */ >> + kvm_msg("The hypervisor itself shouldn't cause aborts"); >> + BUG(); >> + default: >> + kvm_msg("Unkown exception class: %08x (%08x)", hsr_ec, >> + vcpu->arch.hsr); >> + BUG(); >> + } > > x86 uses a function table, which is slightly nicer. > ok, completely stole the design from x86. >> + >> + return 0; >> +} >> + >> /** >> * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest >> code >> * @vcpu: The VCPU pointer >> @@ -333,6 +390,26 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, >> struct kvm_run *run) >> local_irq_enable(); >> >> trace_kvm_exit(vcpu->arch.regs.pc); >> + >> + ret = handle_exit(vcpu, run, ret); >> + if (ret) { >> + kvm_err(ret, "Error in handle_exit"); >> + break; >> + } >> + >> + if (run->exit_reason == KVM_EXIT_MMIO) >> + break; >> + >> + if (need_resched()) { >> + vcpu_put(vcpu); >> + schedule(); >> + vcpu_load(vcpu); >> + } > > I don't think you need the vcpu_put()/vcpu_load() here; you can replace > the whole thing with cond_resched(). > sorry again about the stupid patch layout. >> + >> + if (signal_pending(current) && !(run->exit_reason)) { >> + run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; > > This exit reason doesn't fit with KVM_IRQ_LINE. > > With KVM_INTERRUPT, userspace is responsible for determining when to > inject interrupts, so it needs to know when guest interrupts become > enabled (this is KVM_EXIT_IRQ_WINDOWS_OPEN). With KVM_IRQ_LINE, > userspace just sets the line status, and the kernel takes care of > everything. > > Oh, and you need to exit to userspace unconditionally if a signal is > pending. > also fixed. sorry again agin. >> + break; >> + } >> } >> >> + >> +/** >> + * Co-processor emulation >> + */ >> + >> +struct coproc_params { >> + unsigned long CRm; >> + unsigned long CRn; >> + unsigned long Op1; >> + unsigned long Op2; >> + unsigned long Rt1; >> + unsigned long Rt
Re: [PATCH v5 07/13] ARM: KVM: Emulation framework and CP15 emulation
On 12/11/2011 12:25 PM, Christoffer Dall wrote: > From: Christoffer Dall > > Adds a new important function in the main KVM/ARM code called > handle_exit() which is called from kvm_arch_vcpu_ioctl_run() on returns > from guest execution. This function examines the Hyp-Syndrome-Register > (HSR), which contains information telling KVM what caused the exit from > the guest. > > Some of the reasons for an exit are CP15 accesses, which are > not allowed from the guest and this commits handles these exits by commit > emulating the intented operation in software and skip the guest intended > instruction. > > @@ -306,6 +307,62 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) > return 0; > } > > +static inline int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, > + int exception_index) > +{ > + unsigned long hsr_ec; > + > + if (exception_index == ARM_EXCEPTION_IRQ) > + return 0; > + > + if (exception_index != ARM_EXCEPTION_HVC) { > + kvm_err(-EINVAL, "Unsupported exception type"); > + return -EINVAL; > + } > + > + hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT; > + switch (hsr_ec) { > + case HSR_EC_WFI: > + return kvm_handle_wfi(vcpu, run); > + case HSR_EC_CP15_32: > + case HSR_EC_CP15_64: > + return kvm_handle_cp15_access(vcpu, run); > + case HSR_EC_CP14_MR: > + return kvm_handle_cp14_access(vcpu, run); > + case HSR_EC_CP14_LS: > + return kvm_handle_cp14_load_store(vcpu, run); > + case HSR_EC_CP14_64: > + return kvm_handle_cp14_access(vcpu, run); > + case HSR_EC_CP_0_13: > + return kvm_handle_cp_0_13_access(vcpu, run); > + case HSR_EC_CP10_ID: > + return kvm_handle_cp10_id(vcpu, run); > + case HSR_EC_SVC_HYP: > + /* SVC called from Hyp mode should never get here */ > + kvm_msg("SVC called from Hyp mode shouldn't go here"); > + BUG(); > + case HSR_EC_HVC: > + kvm_msg("hvc: %x (at %08x)", vcpu->arch.hsr & ((1 << 16) - 1), > + vcpu->arch.regs.pc); > + kvm_msg(" HSR: %8x", vcpu->arch.hsr); > + break; > + case HSR_EC_IABT: > + case HSR_EC_DABT: > + return kvm_handle_guest_abort(vcpu, run); > + case HSR_EC_IABT_HYP: > + case HSR_EC_DABT_HYP: > + /* The hypervisor should never cause aborts */ > + kvm_msg("The hypervisor itself shouldn't cause aborts"); > + BUG(); > + default: > + kvm_msg("Unkown exception class: %08x (%08x)", hsr_ec, > + vcpu->arch.hsr); > + BUG(); > + } x86 uses a function table, which is slightly nicer. > + > + return 0; > +} > + > /** > * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code > * @vcpu:The VCPU pointer > @@ -333,6 +390,26 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, > struct kvm_run *run) > local_irq_enable(); > > trace_kvm_exit(vcpu->arch.regs.pc); > + > + ret = handle_exit(vcpu, run, ret); > + if (ret) { > + kvm_err(ret, "Error in handle_exit"); > + break; > + } > + > + if (run->exit_reason == KVM_EXIT_MMIO) > + break; > + > + if (need_resched()) { > + vcpu_put(vcpu); > + schedule(); > + vcpu_load(vcpu); > + } I don't think you need the vcpu_put()/vcpu_load() here; you can replace the whole thing with cond_resched(). > + > + if (signal_pending(current) && !(run->exit_reason)) { > + run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; This exit reason doesn't fit with KVM_IRQ_LINE. With KVM_INTERRUPT, userspace is responsible for determining when to inject interrupts, so it needs to know when guest interrupts become enabled (this is KVM_EXIT_IRQ_WINDOWS_OPEN). With KVM_IRQ_LINE, userspace just sets the line status, and the kernel takes care of everything. Oh, and you need to exit to userspace unconditionally if a signal is pending. > + break; > + } > } > > + > +/** > + * Co-processor emulation > + */ > + > +struct coproc_params { > + unsigned long CRm; > + unsigned long CRn; > + unsigned long Op1; > + unsigned long Op2; > + unsigned long Rt1; > + unsigned long Rt2; > + bool is_64bit; > + bool is_write; > +}; > + > +#define CP15_OP(_vcpu, _params, _cp15_reg) \ > +do { \ > + if (_params->is_write) \ > + _vcpu->arch.cp15._cp15_reg = *vcpu_reg(_vcpu, _params->Rt1); \ > + else \ > + *vcpu_reg(_vcpu, _params->Rt1) = _vcpu->arch.cp15._cp15_reg; \ > +
[PATCH v5 07/13] ARM: KVM: Emulation framework and CP15 emulation
From: Christoffer Dall Adds a new important function in the main KVM/ARM code called handle_exit() which is called from kvm_arch_vcpu_ioctl_run() on returns from guest execution. This function examines the Hyp-Syndrome-Register (HSR), which contains information telling KVM what caused the exit from the guest. Some of the reasons for an exit are CP15 accesses, which are not allowed from the guest and this commits handles these exits by emulating the intented operation in software and skip the guest instruction. Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_emulate.h |7 + arch/arm/kvm/arm.c | 77 ++ arch/arm/kvm/emulate.c | 195 arch/arm/kvm/trace.h | 28 + 4 files changed, 307 insertions(+), 0 deletions(-) diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 91d461a..af21fd5 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -40,6 +40,13 @@ static inline unsigned char vcpu_mode(struct kvm_vcpu *vcpu) return modes_table[vcpu->arch.regs.cpsr & 0xf]; } +int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp15_access(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); + /* * Return the SPSR for the specified mode of the virtual CPU. */ diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index f5d..a6e1763 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "debug.h" @@ -306,6 +307,62 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) return 0; } +static inline int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + int exception_index) +{ + unsigned long hsr_ec; + + if (exception_index == ARM_EXCEPTION_IRQ) + return 0; + + if (exception_index != ARM_EXCEPTION_HVC) { + kvm_err(-EINVAL, "Unsupported exception type"); + return -EINVAL; + } + + hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT; + switch (hsr_ec) { + case HSR_EC_WFI: + return kvm_handle_wfi(vcpu, run); + case HSR_EC_CP15_32: + case HSR_EC_CP15_64: + return kvm_handle_cp15_access(vcpu, run); + case HSR_EC_CP14_MR: + return kvm_handle_cp14_access(vcpu, run); + case HSR_EC_CP14_LS: + return kvm_handle_cp14_load_store(vcpu, run); + case HSR_EC_CP14_64: + return kvm_handle_cp14_access(vcpu, run); + case HSR_EC_CP_0_13: + return kvm_handle_cp_0_13_access(vcpu, run); + case HSR_EC_CP10_ID: + return kvm_handle_cp10_id(vcpu, run); + case HSR_EC_SVC_HYP: + /* SVC called from Hyp mode should never get here */ + kvm_msg("SVC called from Hyp mode shouldn't go here"); + BUG(); + case HSR_EC_HVC: + kvm_msg("hvc: %x (at %08x)", vcpu->arch.hsr & ((1 << 16) - 1), +vcpu->arch.regs.pc); + kvm_msg(" HSR: %8x", vcpu->arch.hsr); + break; + case HSR_EC_IABT: + case HSR_EC_DABT: + return kvm_handle_guest_abort(vcpu, run); + case HSR_EC_IABT_HYP: + case HSR_EC_DABT_HYP: + /* The hypervisor should never cause aborts */ + kvm_msg("The hypervisor itself shouldn't cause aborts"); + BUG(); + default: + kvm_msg("Unkown exception class: %08x (%08x)", hsr_ec, + vcpu->arch.hsr); + BUG(); + } + + return 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -333,6 +390,26 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) local_irq_enable(); trace_kvm_exit(vcpu->arch.regs.pc); + + ret = handle_exit(vcpu, run, ret); + if (ret) { + kvm_err(ret, "Error in handle_exit"); + break; + } + + if (run->exit_reason == KVM_EXIT_MMIO) + break; + + if (need_resched()) { + vcpu_put(vcpu); + schedule(); + vcpu_load(vcpu); + } + + if (signal_pending(current) && !(run->exit_reason)) { + run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; +