This function is needed for kvm_mmu_page_fault() to create synthetic
page faults.

Code originally written by Mihai Donțu and Nicușor Cîțu:
https://lore.kernel.org/r/20211006173113.26445-18-ala...@bitdefender.com
Renamed fault_gla() to fault_gva() and use the new
EPT_VIOLATION_GVA_IS_VALID.

Cc: Borislav Petkov <b...@alien8.de>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: H. Peter Anvin <h...@zytor.com>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: Kees Cook <keesc...@chromium.org>
Cc: Madhavan T. Venkataraman <madve...@linux.microsoft.com>
Cc: Paolo Bonzini <pbonz...@redhat.com>
Cc: Sean Christopherson <sea...@google.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Vitaly Kuznetsov <vkuzn...@redhat.com>
Cc: Wanpeng Li <wanpen...@tencent.com>
Co-developed-by: Mihai Donțu <mdo...@bitdefender.com>
Signed-off-by: Mihai Donțu <mdo...@bitdefender.com>
Co-developed-by: Nicușor Cîțu <nicu.c...@icloud.com>
Signed-off-by: Nicușor Cîțu <nicu.c...@icloud.com>
Signed-off-by: Mickaël Salaün <m...@digikod.net>
Link: https://lore.kernel.org/r/20230505152046.6575-2-...@digikod.net
---
 arch/x86/include/asm/kvm-x86-ops.h |  1 +
 arch/x86/include/asm/kvm_host.h    |  2 ++
 arch/x86/kvm/svm/svm.c             |  9 +++++++++
 arch/x86/kvm/vmx/vmx.c             | 10 ++++++++++
 4 files changed, 22 insertions(+)

diff --git a/arch/x86/include/asm/kvm-x86-ops.h 
b/arch/x86/include/asm/kvm-x86-ops.h
index abccd51dcfca..b761182a9444 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -131,6 +131,7 @@ KVM_X86_OP(msr_filter_changed)
 KVM_X86_OP(complete_emulated_msr)
 KVM_X86_OP(vcpu_deliver_sipi_vector)
 KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
+KVM_X86_OP(fault_gva)
 
 #undef KVM_X86_OP
 #undef KVM_X86_OP_OPTIONAL
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6aaae18f1854..f319bcdeb8bd 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1706,6 +1706,8 @@ struct kvm_x86_ops {
         * Returns vCPU specific APICv inhibit reasons
         */
        unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
+
+       u64 (*fault_gva)(struct kvm_vcpu *vcpu);
 };
 
 struct kvm_x86_nested_ops {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 9a194aa1a75a..8b47b38aaf7f 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4700,6 +4700,13 @@ static int svm_vm_init(struct kvm *kvm)
        return 0;
 }
 
+static u64 svm_fault_gva(struct kvm_vcpu *vcpu)
+{
+       const struct vcpu_svm *svm = to_svm(vcpu);
+
+       return svm->vcpu.arch.cr2 ? svm->vcpu.arch.cr2 : ~0ull;
+}
+
 static struct kvm_x86_ops svm_x86_ops __initdata = {
        .name = "kvm_amd",
 
@@ -4826,6 +4833,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
 
        .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
        .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
+
+       .fault_gva = svm_fault_gva,
 };
 
 /*
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 7eec0226d56a..9870db887a62 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -8067,6 +8067,14 @@ static void vmx_vm_destroy(struct kvm *kvm)
        free_pages((unsigned long)kvm_vmx->pid_table, 
vmx_get_pid_table_order(kvm));
 }
 
+static u64 vmx_fault_gva(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.exit_qualification & EPT_VIOLATION_GVA_IS_VALID)
+               return vmcs_readl(GUEST_LINEAR_ADDRESS);
+
+       return ~0ull;
+}
+
 static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .name = "kvm_intel",
 
@@ -8204,6 +8212,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .complete_emulated_msr = kvm_complete_insn_gp,
 
        .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
+
+       .fault_gva = vmx_fault_gva,
 };
 
 static unsigned int vmx_handle_intel_pt_intr(void)
-- 
2.40.1


Reply via email to