We get illegal instruction trap whenever Guest/VM executes WFI instruction.
This patch handles WFI trap by blocking the trapped VCPU using kvm_vcpu_block() API. The blocked VCPU will be automatically resumed whenever a VCPU interrupt is injected from user-space or from in-kernel IRQCHIP emulation. Signed-off-by: Anup Patel <anup.pa...@wdc.com> --- arch/riscv/kvm/vcpu_exit.c | 86 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c index efc06198c259..f4ddf357ded8 100644 --- a/arch/riscv/kvm/vcpu_exit.c +++ b/arch/riscv/kvm/vcpu_exit.c @@ -12,6 +12,9 @@ #include <linux/kvm_host.h> #include <asm/csr.h> +#define INSN_MASK_WFI 0xffffff00 +#define INSN_MATCH_WFI 0x10500000 + #define INSN_MATCH_LB 0x3 #define INSN_MASK_LB 0x707f #define INSN_MATCH_LH 0x1003 @@ -179,6 +182,85 @@ static ulong get_insn(struct kvm_vcpu *vcpu) return val; } +typedef int (*illegal_insn_func)(struct kvm_vcpu *vcpu, + struct kvm_run *run, + ulong insn); + +static int truly_illegal_insn(struct kvm_vcpu *vcpu, + struct kvm_run *run, + ulong insn) +{ + /* TODO: Redirect trap to Guest VCPU */ + return -ENOTSUPP; +} + +static int system_opcode_insn(struct kvm_vcpu *vcpu, + struct kvm_run *run, + ulong insn) +{ + if ((insn & INSN_MASK_WFI) == INSN_MATCH_WFI) { + vcpu->stat.wfi_exit_stat++; + if (!kvm_riscv_vcpu_has_interrupt(vcpu)) { + kvm_vcpu_block(vcpu); + kvm_clear_request(KVM_REQ_UNHALT, vcpu); + } + vcpu->arch.guest_context.sepc += INSN_LEN(insn); + return 1; + } + + return truly_illegal_insn(vcpu, run, insn); +} + +static illegal_insn_func illegal_insn_table[32] = { + truly_illegal_insn, /* 0 */ + truly_illegal_insn, /* 1 */ + truly_illegal_insn, /* 2 */ + truly_illegal_insn, /* 3 */ + truly_illegal_insn, /* 4 */ + truly_illegal_insn, /* 5 */ + truly_illegal_insn, /* 6 */ + truly_illegal_insn, /* 7 */ + truly_illegal_insn, /* 8 */ + truly_illegal_insn, /* 9 */ + truly_illegal_insn, /* 10 */ + truly_illegal_insn, /* 11 */ + truly_illegal_insn, /* 12 */ + truly_illegal_insn, /* 13 */ + truly_illegal_insn, /* 14 */ + truly_illegal_insn, /* 15 */ + truly_illegal_insn, /* 16 */ + truly_illegal_insn, /* 17 */ + truly_illegal_insn, /* 18 */ + truly_illegal_insn, /* 19 */ + truly_illegal_insn, /* 20 */ + truly_illegal_insn, /* 21 */ + truly_illegal_insn, /* 22 */ + truly_illegal_insn, /* 23 */ + truly_illegal_insn, /* 24 */ + truly_illegal_insn, /* 25 */ + truly_illegal_insn, /* 26 */ + truly_illegal_insn, /* 27 */ + system_opcode_insn, /* 28 */ + truly_illegal_insn, /* 29 */ + truly_illegal_insn, /* 30 */ + truly_illegal_insn /* 31 */ +}; + +static int illegal_inst_fault(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long stval) +{ + ulong insn = stval; + + if (unlikely((insn & 3) != 3)) { + if (insn == 0) + insn = get_insn(vcpu); + if ((insn & 3) != 3) + return truly_illegal_insn(vcpu, run, insn); + } + + return illegal_insn_table[(insn & 0x7c) >> 2](vcpu, run, insn); +} + static int emulate_load(struct kvm_vcpu *vcpu, struct kvm_run *run, unsigned long fault_addr) { @@ -439,6 +521,10 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, ret = -EFAULT; run->exit_reason = KVM_EXIT_UNKNOWN; switch (scause) { + case EXC_INST_ILLEGAL: + if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) + ret = illegal_inst_fault(vcpu, run, stval); + break; case EXC_INST_PAGE_FAULT: case EXC_LOAD_PAGE_FAULT: case EXC_STORE_PAGE_FAULT: -- 2.17.1