The purpose of returning the just-pinned VMA is so that we can use the
information within, like its address. Also it should be tracked and used
as the cookie to unpin...

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Reviewed-by: Zhenyu Wang <zhen...@linux.intel.com>
---
 drivers/gpu/drm/i915/gvt/execlist.c | 20 +++++++++-----------
 1 file changed, 9 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/execlist.c 
b/drivers/gpu/drm/i915/gvt/execlist.c
index a9d04c378755..cfdd3ae13fb0 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -385,8 +385,6 @@ static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry 
*entry_obj,
 static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 {
        int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
-       struct i915_vma *vma;
-       unsigned long gma;
 
        /* pin the gem object to ggtt */
        if (!list_empty(&workload->shadow_bb)) {
@@ -398,8 +396,10 @@ static void prepare_shadow_batch_buffer(struct 
intel_vgpu_workload *workload)
 
                list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
                                list) {
+                       struct i915_vma *vma;
+
                        vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
-                                       0, 0);
+                                                      4, 0);
                        if (IS_ERR(vma)) {
                                gvt_err("Cannot pin\n");
                                return;
@@ -407,9 +407,9 @@ static void prepare_shadow_batch_buffer(struct 
intel_vgpu_workload *workload)
                        i915_gem_object_unpin_pages(entry_obj->obj);
 
                        /* update the relocate gma with shadow batch buffer*/
-                       gma = i915_gem_object_ggtt_offset(entry_obj->obj, NULL);
-                       WARN_ON(!IS_ALIGNED(gma, 4));
-                       set_gma_to_bb_cmd(entry_obj, gma, gmadr_bytes);
+                       set_gma_to_bb_cmd(entry_obj,
+                                         i915_ggtt_offset(vma),
+                                         gmadr_bytes);
                }
        }
 }
@@ -441,7 +441,6 @@ static int update_wa_ctx_2_shadow_ctx(struct 
intel_shadow_wa_ctx *wa_ctx)
 static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
        struct i915_vma *vma;
-       unsigned long gma;
        unsigned char *per_ctx_va =
                (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
                wa_ctx->indirect_ctx.size;
@@ -449,16 +448,15 @@ static void prepare_shadow_wa_ctx(struct 
intel_shadow_wa_ctx *wa_ctx)
        if (wa_ctx->indirect_ctx.size == 0)
                return;
 
-       vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 0, 0, 0);
+       vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
+                                      0, CACHELINE_BYTES, 0);
        if (IS_ERR(vma)) {
                gvt_err("Cannot pin indirect ctx obj\n");
                return;
        }
        i915_gem_object_unpin_pages(wa_ctx->indirect_ctx.obj);
 
-       gma = i915_gem_object_ggtt_offset(wa_ctx->indirect_ctx.obj, NULL);
-       WARN_ON(!IS_ALIGNED(gma, CACHELINE_BYTES));
-       wa_ctx->indirect_ctx.shadow_gma = gma;
+       wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
 
        wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
        memset(per_ctx_va, 0, CACHELINE_BYTES);
-- 
2.9.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to