Now that gvt_pin_guest_page() explicitly verifies the pinned PFN is a
transparent hugepage page, don't use KVM's gfn_to_pfn() to pre-check if a
2M GTT entry is possible and instead just try to map the GFN with a 2MB
entry.  Using KVM to query pfn that is ultimately managed through VFIO is
odd, and KVM's gfn_to_pfn() is not intended for non-KVM consumption; it's
exported only because of KVM vendor modules (x86 and PPC).

Signed-off-by: Sean Christopherson <sea...@google.com>
---
 drivers/gpu/drm/i915/gvt/gtt.c | 33 +++++++++++----------------------
 1 file changed, 11 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 9936f8bd19af..59ba6639e622 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1167,21 +1167,19 @@ static inline void ppgtt_generate_shadow_entry(struct 
intel_gvt_gtt_entry *se,
 }
 
 /*
- * Check if can do 2M page
+ * Try to map a 2M gtt entry.
  * @vgpu: target vgpu
  * @entry: target pfn's gtt entry
  *
- * Return 1 if 2MB huge gtt shadowing is possible, 0 if miscondition,
- * negative if found err.
+ * Return 1 if 2MB huge gtt shadow was creation, 0 if the entry needs to be
+ * split, negative if found err.
  */
-static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
-       struct intel_gvt_gtt_entry *entry)
+static int try_map_2MB_gtt_entry(struct intel_vgpu *vgpu,
+       struct intel_gvt_gtt_entry *entry, dma_addr_t *dma_addr)
 {
        const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
        unsigned long gfn = ops->get_pfn(entry);
-       kvm_pfn_t pfn;
        int max_level;
-       int ret;
 
        if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
                return 0;
@@ -1194,16 +1192,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
        if (max_level < PG_LEVEL_2M)
                return 0;
 
-       pfn = gfn_to_pfn(vgpu->vfio_device.kvm, gfn);
-       if (is_error_noslot_pfn(pfn))
-               return -EINVAL;
-
-       if (!pfn_valid(pfn))
-               return -EINVAL;
-
-       ret = PageTransHuge(pfn_to_page(pfn));
-       kvm_release_pfn_clean(pfn);
-       return ret;
+       return intel_gvt_dma_map_guest_page(vgpu, gfn, I915_GTT_PAGE_SIZE_2M, 
dma_addr);
 }
 
 static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
@@ -1290,7 +1279,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu 
*vgpu,
 {
        const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
        struct intel_gvt_gtt_entry se = *ge;
-       unsigned long gfn, page_size = PAGE_SIZE;
+       unsigned long gfn;
        dma_addr_t dma_addr;
        int ret;
 
@@ -1313,13 +1302,12 @@ static int ppgtt_populate_shadow_entry(struct 
intel_vgpu *vgpu,
                return split_64KB_gtt_entry(vgpu, spt, index, &se);
        case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
                gvt_vdbg_mm("shadow 2M gtt entry\n");
-               ret = is_2MB_gtt_possible(vgpu, ge);
+               ret = try_map_2MB_gtt_entry(vgpu, ge, &dma_addr);
                if (ret == 0)
                        return split_2MB_gtt_entry(vgpu, spt, index, &se);
                else if (ret < 0)
                        return ret;
-               page_size = I915_GTT_PAGE_SIZE_2M;
-               break;
+               goto set_shadow_entry;
        case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
                gvt_vgpu_err("GVT doesn't support 1GB entry\n");
                return -EINVAL;
@@ -1328,10 +1316,11 @@ static int ppgtt_populate_shadow_entry(struct 
intel_vgpu *vgpu,
        }
 
        /* direct shadow */
-       ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr);
+       ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr);
        if (ret)
                return -ENXIO;
 
+set_shadow_entry:
        pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
        ppgtt_set_shadow_entry(spt, &se, index);
        return 0;
-- 
2.39.0.314.g84b9a713c41-goog

Reply via email to