From: John Harrison <john.c.harri...@intel.com>

In order to explcitly track all GPU work (and completely remove the outstanding
lazy request), it is necessary to add extra i915_add_request() calls to various
places. Some of these do not need the implicit cache flush done as part of the
standard batch buffer submission process.

This patch adds a flag to _add_request() to specify whether the flush is
required or not.

For: VIZ-5115
Signed-off-by: John Harrison <john.c.harri...@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h              |    7 +++++--
 drivers/gpu/drm/i915/i915_gem.c              |   25 +++++++++++--------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   |    2 +-
 drivers/gpu/drm/i915/i915_gem_render_state.c |    2 +-
 drivers/gpu/drm/i915/intel_lrc.c             |    2 +-
 5 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 21a2b35..5c87876 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2815,9 +2815,12 @@ int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_suspend(struct drm_device *dev);
 int __i915_add_request(struct intel_engine_cs *ring,
                       struct drm_file *file,
-                      struct drm_i915_gem_object *batch_obj);
+                      struct drm_i915_gem_object *batch_obj,
+                      bool flush_caches);
 #define i915_add_request(ring) \
-       __i915_add_request(ring, NULL, NULL)
+       __i915_add_request(ring, NULL, NULL, true)
+#define i915_add_request_no_flush(ring) \
+       __i915_add_request(ring, NULL, NULL, false)
 int __i915_wait_request(struct drm_i915_gem_request *req,
                        unsigned reset_counter,
                        bool interruptible,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9546992..96f9155 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2408,7 +2408,8 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
 
 int __i915_add_request(struct intel_engine_cs *ring,
                       struct drm_file *file,
-                      struct drm_i915_gem_object *obj)
+                      struct drm_i915_gem_object *obj,
+                      bool flush_caches)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_request *request;
@@ -2433,12 +2434,11 @@ int __i915_add_request(struct intel_engine_cs *ring,
         * is that the flush _must_ happen before the next request, no matter
         * what.
         */
-       if (i915.enable_execlists) {
-               ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
-               if (ret)
-                       return ret;
-       } else {
-               ret = intel_ring_flush_all_caches(ring);
+       if (flush_caches) {
+               if (i915.enable_execlists)
+                       ret = logical_ring_flush_all_caches(ringbuf, 
request->ctx);
+               else
+                       ret = intel_ring_flush_all_caches(ring);
                if (ret)
                        return ret;
        }
@@ -2450,15 +2450,12 @@ int __i915_add_request(struct intel_engine_cs *ring,
         */
        request->postfix = intel_ring_get_tail(ringbuf);
 
-       if (i915.enable_execlists) {
+       if (i915.enable_execlists)
                ret = ring->emit_request(ringbuf, request);
-               if (ret)
-                       return ret;
-       } else {
+       else
                ret = ring->add_request(ring);
-               if (ret)
-                       return ret;
-       }
+       if (ret)
+               return ret;
 
        request->head = request_start;
        request->tail = intel_ring_get_tail(ringbuf);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f7c19bc..76f6dcf 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -997,7 +997,7 @@ i915_gem_execbuffer_retire_commands(struct 
i915_execbuffer_params *params)
 
        /* Add a breadcrumb for the completion of the batch buffer */
        return __i915_add_request(params->ring, params->file,
-                                 params->batch_obj);
+                                 params->batch_obj, true);
 }
 
 static int
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c 
b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 521548a..aba39c3 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -173,7 +173,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring)
 
        i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
 
-       ret = __i915_add_request(ring, NULL, so.obj);
+       ret = __i915_add_request(ring, NULL, so.obj, true);
        /* __i915_add_request moves object to inactive if it fails */
 out:
        i915_gem_render_state_fini(&so);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 450eed4..318500c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1612,7 +1612,7 @@ int intel_lr_context_render_state_init(struct 
intel_engine_cs *ring,
 
        i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
 
-       ret = __i915_add_request(ring, file, so.obj);
+       ret = __i915_add_request(ring, file, so.obj, true);
        /* intel_logical_ring_add_request moves object to inactive if it
         * fails */
 out:
-- 
1.7.9.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to