Peter Hornyack <peterhorny...@google.com> writes:

> Set the vm's unhandled_msr_exits flag when user space calls the
> KVM_ENABLE_CAP ioctl with KVM_CAP_UNHANDLED_MSR_EXITS. After kvm fails
> to handle a guest rdmsr or wrmsr, check this flag and exit to user space
> with KVM_EXIT_MSR rather than immediately injecting a GP fault.
>
> On reentry into kvm, use the complete_userspace_io callback path to call
> vmx_complete_userspace_msr. Complete the MSR access if user space was
> able to handle it successfully, or fail the MSR access and inject a GP
> fault if user space could not handle the access.
>
...
> +static int vmx_complete_userspace_msr(struct kvm_vcpu *vcpu)
> +{
> +     struct msr_data msr;
> +
> +     if (vcpu->run->msr.index != vcpu->arch.regs[VCPU_REGS_RCX]) {
> +             pr_debug("msr.index 0x%x changed, does not match ecx 0x%lx\n",
> +                      vcpu->run->msr.index,
> +                      vcpu->arch.regs[VCPU_REGS_RCX]);
> +             goto err_out;
> +     }
> +     msr.index = vcpu->run->msr.index;
> +     msr.data = vcpu->run->msr.data;
> +     msr.host_initiated = false;
> +
> +     switch (vcpu->run->msr.direction) {
> +     case KVM_EXIT_MSR_RDMSR:
> +             if (vcpu->run->msr.handled == KVM_EXIT_MSR_HANDLED)
> +                     complete_rdmsr(vcpu, &msr);
> +             else
> +                     fail_rdmsr(vcpu, &msr);
> +             break;

Should we check for MSR_UNHANDLED ? Could be "debugging relief" if userspace is
filling it up with random crap! I think it should be ok if the trace function
catches that too.

> +     case KVM_EXIT_MSR_WRMSR:
> +             if (vcpu->run->msr.handled == KVM_EXIT_MSR_HANDLED)
> +                     complete_wrmsr(vcpu, &msr);
> +             else
> +                     fail_wrmsr(vcpu, &msr);
> +             break;
> +     default:
> +             pr_debug("bad msr.direction %u\n", vcpu->run->msr.direction);
> +             goto err_out;
> +     }
> +
> +     return 1;
> +err_out:
> +     vcpu->run->exit_reason = KVM_EXIT_MSR;
> +     vcpu->run->msr.direction = KVM_EXIT_MSR_COMPLETION_FAILED;
> +     return 0;
> +}
> +
> +/*
> + * Returns 1 if the rdmsr handling is complete; returns 0 if kvm should exit 
> to
> + * user space to handle this rdmsr.
> + */
>  static int handle_rdmsr(struct kvm_vcpu *vcpu)
>  {
>       u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
> @@ -5499,6 +5547,16 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
>       msr_info.index = ecx;
>       msr_info.host_initiated = false;
>       if (vmx_get_msr(vcpu, &msr_info)) {
> +             if (vcpu->kvm->arch.unhandled_msr_exits) {
> +                     vcpu->run->exit_reason = KVM_EXIT_MSR;
> +                     vcpu->run->msr.direction = KVM_EXIT_MSR_RDMSR;
> +                     vcpu->run->msr.index = msr_info.index;
> +                     vcpu->run->msr.data = 0;
> +                     vcpu->run->msr.handled = 0;
> +                     vcpu->arch.complete_userspace_io =
> +                                     vmx_complete_userspace_msr;
> +                     return 0;
> +             }
>               fail_rdmsr(vcpu, &msr_info);
>               return 1;
>       }
> @@ -5508,6 +5566,10 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
>       return 1;
>  }
>  
> +/*
> + * Returns 1 if the wrmsr handling is complete; returns 0 if kvm should exit 
> to
> + * user space to handle this wrmsr.
> + */
>  static int handle_wrmsr(struct kvm_vcpu *vcpu)
>  {
>       struct msr_data msr;
> @@ -5519,6 +5581,16 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
>       msr.index = ecx;
>       msr.host_initiated = false;
>       if (kvm_set_msr(vcpu, &msr) != 0) {
> +             if (vcpu->kvm->arch.unhandled_msr_exits) {

I think kvm should ultimately decide which msrs it can let userspace
handle even if userspace has explicitly enabled the ioctl.

Bandan

> +                     vcpu->run->exit_reason = KVM_EXIT_MSR;
> +                     vcpu->run->msr.direction = KVM_EXIT_MSR_WRMSR;
> +                     vcpu->run->msr.index = msr.index;
> +                     vcpu->run->msr.data = msr.data;
> +                     vcpu->run->msr.handled = 0;
> +                     vcpu->arch.complete_userspace_io =
> +                                     vmx_complete_userspace_msr;
> +                     return 0;
> +             }
>               fail_wrmsr(vcpu, &msr);
>               return 1;
>       }
> @@ -8163,7 +8235,7 @@ static bool vmx_xsaves_supported(void)
>  
>  static bool vmx_msr_exits_supported(void)
>  {
> -     return false;
> +     return true;
>  }
>  
>  static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 4bbc2a1676c9..5c22f4655741 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -2461,6 +2461,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long 
> ext)
>       case KVM_CAP_ENABLE_CAP_VM:
>       case KVM_CAP_DISABLE_QUIRKS:
>       case KVM_CAP_SET_BOOT_CPU_ID:
> +     case KVM_CAP_UNHANDLED_MSR_EXITS:
>  #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
>       case KVM_CAP_ASSIGN_DEV_IRQ:
>       case KVM_CAP_PCI_2_3:
> @@ -3568,6 +3569,17 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
>               kvm->arch.disabled_quirks = cap->args[0];
>               r = 0;
>               break;
> +     case KVM_CAP_UNHANDLED_MSR_EXITS:
> +             if (kvm_x86_ops->msr_exits_supported()) {
> +                     if (cap->args[0])
> +                             kvm->arch.unhandled_msr_exits = true;
> +                     else
> +                             kvm->arch.unhandled_msr_exits = false;
> +                     r = 0;
> +             } else {
> +                     r = -EINVAL;
> +             }
> +             break;
>       default:
>               r = -EINVAL;
>               break;
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to