From: John Harrison <john.c.harri...@intel.com>

Now that everything above has been converted to use request structures, it is
possible to update the lower level move_to_active() functions to be request
based as well.

For: VIZ-5115
Signed-off-by: John Harrison <john.c.harri...@intel.com>
Reviewed-by: Tomas Elf <tomas....@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h              |    2 +-
 drivers/gpu/drm/i915/i915_gem.c              |   17 ++++++++---------
 drivers/gpu/drm/i915/i915_gem_context.c      |    2 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   |    2 +-
 drivers/gpu/drm/i915/i915_gem_render_state.c |    2 +-
 drivers/gpu/drm/i915/intel_lrc.c             |    2 +-
 6 files changed, 13 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7e0a095..3d50f41 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2668,7 +2668,7 @@ int i915_gem_object_sync(struct drm_i915_gem_object *obj,
                         struct intel_engine_cs *to,
                         struct drm_i915_gem_request **to_req);
 void i915_vma_move_to_active(struct i915_vma *vma,
-                            struct intel_engine_cs *ring);
+                            struct drm_i915_gem_request *req);
 int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9f615bf..d8f4f3d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2180,17 +2180,16 @@ i915_gem_object_get_pages(struct drm_i915_gem_object 
*obj)
 
 static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
-                              struct intel_engine_cs *ring)
+                              struct drm_i915_gem_request *req)
 {
-       struct drm_i915_gem_request *req;
-       struct intel_engine_cs *old_ring;
+       struct intel_engine_cs *new_ring, *old_ring;
 
-       BUG_ON(ring == NULL);
+       BUG_ON(req == NULL);
 
-       req = intel_ring_get_request(ring);
+       new_ring = i915_gem_request_get_ring(req);
        old_ring = i915_gem_request_get_ring(obj->last_read_req);
 
-       if (old_ring != ring && obj->last_write_req) {
+       if (old_ring != new_ring && obj->last_write_req) {
                /* Keep the request relative to the current ring */
                i915_gem_request_assign(&obj->last_write_req, req);
        }
@@ -2201,16 +2200,16 @@ i915_gem_object_move_to_active(struct 
drm_i915_gem_object *obj,
                obj->active = 1;
        }
 
-       list_move_tail(&obj->ring_list, &ring->active_list);
+       list_move_tail(&obj->ring_list, &new_ring->active_list);
 
        i915_gem_request_assign(&obj->last_read_req, req);
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
-                            struct intel_engine_cs *ring)
+                            struct drm_i915_gem_request *req)
 {
        list_move_tail(&vma->mm_list, &vma->vm->active_list);
-       return i915_gem_object_move_to_active(vma->obj, ring);
+       return i915_gem_object_move_to_active(vma->obj, req);
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c 
b/drivers/gpu/drm/i915/i915_gem_context.c
index 75b9d78..815b691 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -656,7 +656,7 @@ static int do_switch(struct drm_i915_gem_request *req)
         */
        if (from != NULL) {
                from->legacy_hw_ctx.rcs_state->base.read_domains = 
I915_GEM_DOMAIN_INSTRUCTION;
-               
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), 
ring);
+               
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), 
req);
                /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
                 * whole damn pipeline, we don't need to explicitly mark the
                 * object dirty. The only exception is that the context must be
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index af4718d..fac5966 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1030,7 +1030,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                        obj->base.pending_read_domains |= 
obj->base.read_domains;
                obj->base.read_domains = obj->base.pending_read_domains;
 
-               i915_vma_move_to_active(vma, ring);
+               i915_vma_move_to_active(vma, req);
                if (obj->base.write_domain) {
                        obj->dirty = 1;
                        i915_gem_request_assign(&obj->last_write_req, req);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c 
b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 6598f9b..e04cda4 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -171,7 +171,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request 
*req)
        if (ret)
                goto out;
 
-       i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req->ring);
+       i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
 
 out:
        i915_gem_render_state_fini(&so);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c7c760d..5481514 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1344,7 +1344,7 @@ static int intel_lr_context_render_state_init(struct 
drm_i915_gem_request *req)
        if (ret)
                goto out;
 
-       i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req->ring);
+       i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
 
 out:
        i915_gem_render_state_fini(&so);
-- 
1.7.9.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to