Fold tdx_sept_drop_private_spte() into tdx_sept_remove_private_spte() to
avoid having to differnatiate between "zap", "drop", and "remove", and to
eliminate dead code due to redundant checks, e.g. on an HKID being
assigned.

No functional change intended.

Reviewed-by: Binbin Wu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
---
 arch/x86/kvm/vmx/tdx.c | 90 +++++++++++++++++++-----------------------
 1 file changed, 40 insertions(+), 50 deletions(-)

diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index c242d73b6a7b..abea9b3d08cf 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1648,55 +1648,6 @@ static int tdx_sept_set_private_spte(struct kvm *kvm, 
gfn_t gfn,
        return tdx_mem_page_record_premap_cnt(kvm, gfn, level, pfn);
 }
 
-static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
-                                     enum pg_level level, struct page *page)
-{
-       int tdx_level = pg_level_to_tdx_sept_level(level);
-       struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
-       gpa_t gpa = gfn_to_gpa(gfn);
-       u64 err, entry, level_state;
-
-       /* TODO: handle large pages. */
-       if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
-               return -EIO;
-
-       if (KVM_BUG_ON(!is_hkid_assigned(kvm_tdx), kvm))
-               return -EIO;
-
-       /*
-        * When zapping private page, write lock is held. So no race condition
-        * with other vcpu sept operation.
-        * Race with TDH.VP.ENTER due to (0-step mitigation) and Guest TDCALLs.
-        */
-       err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
-                                 &level_state);
-
-       if (unlikely(tdx_operand_busy(err))) {
-               /*
-                * The second retry is expected to succeed after kicking off all
-                * other vCPUs and prevent them from invoking TDH.VP.ENTER.
-                */
-               tdx_no_vcpus_enter_start(kvm);
-               err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
-                                         &level_state);
-               tdx_no_vcpus_enter_stop(kvm);
-       }
-
-       if (KVM_BUG_ON(err, kvm)) {
-               pr_tdx_error_2(TDH_MEM_PAGE_REMOVE, err, entry, level_state);
-               return -EIO;
-       }
-
-       err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, page);
-
-       if (KVM_BUG_ON(err, kvm)) {
-               pr_tdx_error(TDH_PHYMEM_PAGE_WBINVD, err);
-               return -EIO;
-       }
-       tdx_quirk_reset_page(page);
-       return 0;
-}
-
 static int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
                                     enum pg_level level, void *private_spt)
 {
@@ -1858,7 +1809,11 @@ static int tdx_sept_free_private_spt(struct kvm *kvm, 
gfn_t gfn,
 static int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
                                        enum pg_level level, kvm_pfn_t pfn)
 {
+       int tdx_level = pg_level_to_tdx_sept_level(level);
+       struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
        struct page *page = pfn_to_page(pfn);
+       gpa_t gpa = gfn_to_gpa(gfn);
+       u64 err, entry, level_state;
        int ret;
 
        /*
@@ -1869,6 +1824,10 @@ static int tdx_sept_remove_private_spte(struct kvm *kvm, 
gfn_t gfn,
        if (KVM_BUG_ON(!is_hkid_assigned(to_kvm_tdx(kvm)), kvm))
                return -EIO;
 
+       /* TODO: handle large pages. */
+       if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
+               return -EIO;
+
        ret = tdx_sept_zap_private_spte(kvm, gfn, level, page);
        if (ret <= 0)
                return ret;
@@ -1879,7 +1838,38 @@ static int tdx_sept_remove_private_spte(struct kvm *kvm, 
gfn_t gfn,
         */
        tdx_track(kvm);
 
-       return tdx_sept_drop_private_spte(kvm, gfn, level, page);
+       /*
+        * When zapping private page, write lock is held. So no race condition
+        * with other vcpu sept operation.
+        * Race with TDH.VP.ENTER due to (0-step mitigation) and Guest TDCALLs.
+        */
+       err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
+                                 &level_state);
+
+       if (unlikely(tdx_operand_busy(err))) {
+               /*
+                * The second retry is expected to succeed after kicking off all
+                * other vCPUs and prevent them from invoking TDH.VP.ENTER.
+                */
+               tdx_no_vcpus_enter_start(kvm);
+               err = tdh_mem_page_remove(&kvm_tdx->td, gpa, tdx_level, &entry,
+                                         &level_state);
+               tdx_no_vcpus_enter_stop(kvm);
+       }
+
+       if (KVM_BUG_ON(err, kvm)) {
+               pr_tdx_error_2(TDH_MEM_PAGE_REMOVE, err, entry, level_state);
+               return -EIO;
+       }
+
+       err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, page);
+       if (KVM_BUG_ON(err, kvm)) {
+               pr_tdx_error(TDH_PHYMEM_PAGE_WBINVD, err);
+               return -EIO;
+       }
+
+       tdx_quirk_reset_page(page);
+       return 0;
 }
 
 void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
-- 
2.51.0.858.gf9c4a03a3a-goog


Reply via email to