On 29/07/19 13:57, Anup Patel wrote:
> +static ulong get_insn(struct kvm_vcpu *vcpu)
> +{
> +     ulong __sepc = vcpu->arch.guest_context.sepc;
> +     ulong __hstatus, __sstatus, __vsstatus;
> +#ifdef CONFIG_RISCV_ISA_C
> +     ulong rvc_mask = 3, tmp;
> +#endif
> +     ulong flags, val;
> +
> +     local_irq_save(flags);
> +
> +     __vsstatus = csr_read(CSR_VSSTATUS);
> +     __sstatus = csr_read(CSR_SSTATUS);
> +     __hstatus = csr_read(CSR_HSTATUS);
> +
> +     csr_write(CSR_VSSTATUS, __vsstatus | SR_MXR);
> +     csr_write(CSR_SSTATUS, vcpu->arch.guest_context.sstatus | SR_MXR);
> +     csr_write(CSR_HSTATUS, vcpu->arch.guest_context.hstatus | HSTATUS_SPRV);
> +
> +#ifndef CONFIG_RISCV_ISA_C
> +     asm ("\n"
> +#ifdef CONFIG_64BIT
> +             STR(LWU) " %[insn], (%[addr])\n"
> +#else
> +             STR(LW) " %[insn], (%[addr])\n"
> +#endif
> +             : [insn] "=&r" (val) : [addr] "r" (__sepc));
> +#else
> +     asm ("and %[tmp], %[addr], 2\n"
> +             "bnez %[tmp], 1f\n"
> +#ifdef CONFIG_64BIT
> +             STR(LWU) " %[insn], (%[addr])\n"
> +#else
> +             STR(LW) " %[insn], (%[addr])\n"
> +#endif
> +             "and %[tmp], %[insn], %[rvc_mask]\n"
> +             "beq %[tmp], %[rvc_mask], 2f\n"
> +             "sll %[insn], %[insn], %[xlen_minus_16]\n"
> +             "srl %[insn], %[insn], %[xlen_minus_16]\n"
> +             "j 2f\n"
> +             "1:\n"
> +             "lhu %[insn], (%[addr])\n"
> +             "and %[tmp], %[insn], %[rvc_mask]\n"
> +             "bne %[tmp], %[rvc_mask], 2f\n"
> +             "lhu %[tmp], 2(%[addr])\n"
> +             "sll %[tmp], %[tmp], 16\n"
> +             "add %[insn], %[insn], %[tmp]\n"
> +             "2:"
> +     : [vsstatus] "+&r" (__vsstatus), [insn] "=&r" (val),
> +       [tmp] "=&r" (tmp)
> +     : [addr] "r" (__sepc), [rvc_mask] "r" (rvc_mask),
> +       [xlen_minus_16] "i" (__riscv_xlen - 16));
> +#endif
> +
> +     csr_write(CSR_HSTATUS, __hstatus);
> +     csr_write(CSR_SSTATUS, __sstatus);
> +     csr_write(CSR_VSSTATUS, __vsstatus);
> +
> +     local_irq_restore(flags);
> +
> +     return val;
> +}
> +

This also needs fixups for exceptions, because the guest can race
against the host and modify its page tables concurrently with the
vmexit.  (How effective this is, of course, depends on how the TLB is
implemented in hardware, but you need to do the safe thing anyway).

Paolo

Reply via email to