This function is needed for kvm_mmu_page_fault() to create synthetic page faults.
Code originally written by Mihai Donțu and Nicușor Cîțu: https://lore.kernel.org/r/20211006173113.26445-18-ala...@bitdefender.com Renamed fault_gla() to fault_gva() and use the new EPT_VIOLATION_GVA_IS_VALID. Cc: Borislav Petkov <b...@alien8.de> Cc: Dave Hansen <dave.han...@linux.intel.com> Cc: H. Peter Anvin <h...@zytor.com> Cc: Ingo Molnar <mi...@redhat.com> Cc: Kees Cook <keesc...@chromium.org> Cc: Madhavan T. Venkataraman <madve...@linux.microsoft.com> Cc: Paolo Bonzini <pbonz...@redhat.com> Cc: Sean Christopherson <sea...@google.com> Cc: Thomas Gleixner <t...@linutronix.de> Cc: Vitaly Kuznetsov <vkuzn...@redhat.com> Cc: Wanpeng Li <wanpen...@tencent.com> Co-developed-by: Mihai Donțu <mdo...@bitdefender.com> Signed-off-by: Mihai Donțu <mdo...@bitdefender.com> Co-developed-by: Nicușor Cîțu <nicu.c...@icloud.com> Signed-off-by: Nicușor Cîțu <nicu.c...@icloud.com> Signed-off-by: Mickaël Salaün <m...@digikod.net> --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/svm/svm.c | 9 +++++++++ arch/x86/kvm/vmx/vmx.c | 10 ++++++++++ 4 files changed, 22 insertions(+) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index e3054e3e46d5..ba3db679db2b 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -134,6 +134,7 @@ KVM_X86_OP(msr_filter_changed) KVM_X86_OP(complete_emulated_msr) KVM_X86_OP(vcpu_deliver_sipi_vector) KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); +KVM_X86_OP(fault_gva) #undef KVM_X86_OP #undef KVM_X86_OP_OPTIONAL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index dff10051e9b6..0415dacd4b28 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1750,6 +1750,8 @@ struct kvm_x86_ops { * Returns vCPU specific APICv inhibit reasons */ unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu); + + u64 (*fault_gva)(struct kvm_vcpu *vcpu); }; struct kvm_x86_nested_ops { diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index beea99c8e8e0..d32517a2cf9c 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4906,6 +4906,13 @@ static int svm_vm_init(struct kvm *kvm) return 0; } +static u64 svm_fault_gva(struct kvm_vcpu *vcpu) +{ + const struct vcpu_svm *svm = to_svm(vcpu); + + return svm->vcpu.arch.cr2 ? svm->vcpu.arch.cr2 : ~0ull; +} + static struct kvm_x86_ops svm_x86_ops __initdata = { .name = KBUILD_MODNAME, @@ -5037,6 +5044,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector, .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons, + + .fault_gva = svm_fault_gva, }; /* diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 1b1581f578b0..a8158bc1dda9 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -8233,6 +8233,14 @@ static void vmx_vm_destroy(struct kvm *kvm) free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm)); } +static u64 vmx_fault_gva(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.exit_qualification & EPT_VIOLATION_GVA_IS_VALID) + return vmcs_readl(GUEST_LINEAR_ADDRESS); + + return ~0ull; +} + static struct kvm_x86_ops vmx_x86_ops __initdata = { .name = KBUILD_MODNAME, @@ -8373,6 +8381,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .complete_emulated_msr = kvm_complete_insn_gp, .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector, + + .fault_gva = vmx_fault_gva, }; static unsigned int vmx_handle_intel_pt_intr(void) -- 2.42.1