If intel_gvt_dma_map_guest_page failed, it will call
ppgtt_invalidate_spt, which will finally free the spt.
But the caller does not notice that, it will free spt again in error path.

Fix this by spliting invalidate and free in ppgtt_invalidate_spt.
Only free spt when in good case.

Reported-by: Zheng Wang <hackerzheng...@gmail.com>
Signed-off-by: Zheng Wang <zyytlz...@163.com>
---
v2:
- split initial function into two api function suggested by Greg

v1: https://lore.kernel.org/all/20220928033340.1063949-1-zyytlz...@163.com/
---
 drivers/gpu/drm/i915/gvt/gtt.c | 31 +++++++++++++++++++++----------
 1 file changed, 21 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index ce0eb03709c3..55d8e1419302 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -959,6 +959,7 @@ static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt 
*spt)
        return atomic_dec_return(&spt->refcount);
 }
 
+static int  ppgtt_invalidate_and_free_spt(struct intel_vgpu_ppgtt_spt *spt);
 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
 
 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
@@ -995,7 +996,7 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct 
intel_vgpu *vgpu,
                                ops->get_pfn(e));
                return -ENXIO;
        }
-       return ppgtt_invalidate_spt(s);
+       return ppgtt_invalidate_and_free_spt(s);
 }
 
 static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
@@ -1016,18 +1017,31 @@ static inline void ppgtt_invalidate_pte(struct 
intel_vgpu_ppgtt_spt *spt,
        intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
 }
 
-static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
+static int  ppgtt_invalidate_and_free_spt(struct intel_vgpu_ppgtt_spt *spt)
 {
        struct intel_vgpu *vgpu = spt->vgpu;
-       struct intel_gvt_gtt_entry e;
-       unsigned long index;
        int ret;
 
        trace_spt_change(spt->vgpu->id, "die", spt,
-                       spt->guest_page.gfn, spt->shadow_page.type);
-
+               spt->guest_page.gfn, spt->shadow_page.type);
        if (ppgtt_put_spt(spt) > 0)
                return 0;
+       ret = ppgtt_invalidate_spt(spt);
+       if (!ret) {
+               trace_spt_change(spt->vgpu->id, "release", spt,
+                        spt->guest_page.gfn, spt->shadow_page.type);
+               ppgtt_free_spt(spt);
+       }
+
+       return ret;
+}
+
+static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
+{
+       struct intel_vgpu *vgpu = spt->vgpu;
+       struct intel_gvt_gtt_entry e;
+       unsigned long index;
+       int ret;
 
        for_each_present_shadow_entry(spt, &e, index) {
                switch (e.type) {
@@ -1059,9 +1073,6 @@ static int ppgtt_invalidate_spt(struct 
intel_vgpu_ppgtt_spt *spt)
                }
        }
 
-       trace_spt_change(spt->vgpu->id, "release", spt,
-                        spt->guest_page.gfn, spt->shadow_page.type);
-       ppgtt_free_spt(spt);
        return 0;
 fail:
        gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
@@ -1393,7 +1404,7 @@ static int ppgtt_handle_guest_entry_removal(struct 
intel_vgpu_ppgtt_spt *spt,
                        ret = -ENXIO;
                        goto fail;
                }
-               ret = ppgtt_invalidate_spt(s);
+               ret = ppgtt_invalidate_and_free_spt(s);
                if (ret)
                        goto fail;
        } else {
-- 
2.25.1

Reply via email to