Remove set_exception_intercept and clr_exception_intercept.
Replace with generic set_intercept and clr_intercept for these calls.

Signed-off-by: Babu Moger <babu.mo...@amd.com>
Reviewed-by: Jim Mattson <jmatt...@google.com>
---
 arch/x86/kvm/svm/svm.c |   20 ++++++++++----------
 arch/x86/kvm/svm/svm.h |   18 ------------------
 2 files changed, 10 insertions(+), 28 deletions(-)

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 0d7397f4a4f7..96617b61e531 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1003,11 +1003,11 @@ static void init_vmcb(struct vcpu_svm *svm)
 
        set_dr_intercepts(svm);
 
-       set_exception_intercept(svm, INTERCEPT_PF_VECTOR);
-       set_exception_intercept(svm, INTERCEPT_UD_VECTOR);
-       set_exception_intercept(svm, INTERCEPT_MC_VECTOR);
-       set_exception_intercept(svm, INTERCEPT_AC_VECTOR);
-       set_exception_intercept(svm, INTERCEPT_DB_VECTOR);
+       svm_set_intercept(svm, INTERCEPT_PF_VECTOR);
+       svm_set_intercept(svm, INTERCEPT_UD_VECTOR);
+       svm_set_intercept(svm, INTERCEPT_MC_VECTOR);
+       svm_set_intercept(svm, INTERCEPT_AC_VECTOR);
+       svm_set_intercept(svm, INTERCEPT_DB_VECTOR);
        /*
         * Guest access to VMware backdoor ports could legitimately
         * trigger #GP because of TSS I/O permission bitmap.
@@ -1015,7 +1015,7 @@ static void init_vmcb(struct vcpu_svm *svm)
         * as VMware does.
         */
        if (enable_vmware_backdoor)
-               set_exception_intercept(svm, INTERCEPT_GP_VECTOR);
+               svm_set_intercept(svm, INTERCEPT_GP_VECTOR);
 
        svm_set_intercept(svm, INTERCEPT_INTR);
        svm_set_intercept(svm, INTERCEPT_NMI);
@@ -1093,7 +1093,7 @@ static void init_vmcb(struct vcpu_svm *svm)
                /* Setup VMCB for Nested Paging */
                control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
                svm_clr_intercept(svm, INTERCEPT_INVLPG);
-               clr_exception_intercept(svm, INTERCEPT_PF_VECTOR);
+               svm_clr_intercept(svm, INTERCEPT_PF_VECTOR);
                svm_clr_intercept(svm, INTERCEPT_CR3_READ);
                svm_clr_intercept(svm, INTERCEPT_CR3_WRITE);
                save->g_pat = svm->vcpu.arch.pat;
@@ -1135,7 +1135,7 @@ static void init_vmcb(struct vcpu_svm *svm)
 
        if (sev_guest(svm->vcpu.kvm)) {
                svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
-               clr_exception_intercept(svm, INTERCEPT_UD_VECTOR);
+               svm_clr_intercept(svm, INTERCEPT_UD_VECTOR);
        }
 
        vmcb_mark_all_dirty(svm->vmcb);
@@ -1646,11 +1646,11 @@ static void update_exception_bitmap(struct kvm_vcpu 
*vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       clr_exception_intercept(svm, INTERCEPT_BP_VECTOR);
+       svm_clr_intercept(svm, INTERCEPT_BP_VECTOR);
 
        if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
                if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
-                       set_exception_intercept(svm, INTERCEPT_BP_VECTOR);
+                       svm_set_intercept(svm, INTERCEPT_BP_VECTOR);
        }
 }
 
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 8128bac75fa2..fc4bfea3f555 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -261,24 +261,6 @@ static inline void clr_dr_intercepts(struct vcpu_svm *svm)
        recalc_intercepts(svm);
 }
 
-static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
-{
-       struct vmcb *vmcb = get_host_vmcb(svm);
-
-       vmcb_set_intercept(&vmcb->control, bit);
-
-       recalc_intercepts(svm);
-}
-
-static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
-{
-       struct vmcb *vmcb = get_host_vmcb(svm);
-
-       vmcb_clr_intercept(&vmcb->control, bit);
-
-       recalc_intercepts(svm);
-}
-
 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
 {
        struct vmcb *vmcb = get_host_vmcb(svm);

Reply via email to