Currently, we are being fairly lazy and only using a wmb() following an
update to an active batch. Previously, we have found that to be
insufficient to ensure that a write from the CPU reaches memory in a
timely fashion, and in some caches we may need to flush a chipset cache.
To that end, we have i915_gem_chipset_flush() so use it.

Suggested-by: Mika Kuoppala <mika.kuopp...@linux.intel.com>
Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuopp...@linux.intel.com>
---
 drivers/gpu/drm/i915/selftests/i915_gem_request.c | 10 +++++++---
 drivers/gpu/drm/i915/selftests/intel_hangcheck.c  |  7 +++++--
 2 files changed, 12 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c 
b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
index 6664cb2eb0b8..78b9f811707f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
@@ -418,7 +418,10 @@ static struct i915_vma *empty_batch(struct 
drm_i915_private *i915)
                err = PTR_ERR(cmd);
                goto err;
        }
+
        *cmd = MI_BATCH_BUFFER_END;
+       i915_gem_chipset_flush(i915);
+
        i915_gem_object_unpin_map(obj);
 
        err = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -605,8 +608,8 @@ static struct i915_vma *recursive_batch(struct 
drm_i915_private *i915)
                *cmd++ = lower_32_bits(vma->node.start);
        }
        *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
+       i915_gem_chipset_flush(i915);
 
-       wmb();
        i915_gem_object_unpin_map(obj);
 
        return vma;
@@ -625,7 +628,7 @@ static int recursive_batch_resolve(struct i915_vma *batch)
                return PTR_ERR(cmd);
 
        *cmd = MI_BATCH_BUFFER_END;
-       wmb();
+       i915_gem_chipset_flush(batch->vm->i915);
 
        i915_gem_object_unpin_map(batch->obj);
 
@@ -858,7 +861,8 @@ static int live_sequential_engines(void *arg)
                                              I915_MAP_WC);
                if (!IS_ERR(cmd)) {
                        *cmd = MI_BATCH_BUFFER_END;
-                       wmb();
+                       i915_gem_chipset_flush(i915);
+
                        i915_gem_object_unpin_map(request[id]->batch->obj);
                }
 
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c 
b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 377c1de766ce..08159b268893 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -165,6 +165,7 @@ static int emit_recurse_batch(struct hang *h,
                *batch++ = lower_32_bits(vma->node.start);
        }
        *batch++ = MI_BATCH_BUFFER_END; /* not reached */
+       i915_gem_chipset_flush(h->i915);
 
        flags = 0;
        if (INTEL_GEN(vm->i915) <= 5)
@@ -231,7 +232,7 @@ static u32 hws_seqno(const struct hang *h,
 static void hang_fini(struct hang *h)
 {
        *h->batch = MI_BATCH_BUFFER_END;
-       wmb();
+       i915_gem_chipset_flush(h->i915);
 
        i915_gem_object_unpin_map(h->obj);
        i915_gem_object_put(h->obj);
@@ -275,6 +276,8 @@ static int igt_hang_sanitycheck(void *arg)
                i915_gem_request_get(rq);
 
                *h.batch = MI_BATCH_BUFFER_END;
+               i915_gem_chipset_flush(i915);
+
                __i915_add_request(rq, true);
 
                timeout = i915_wait_request(rq,
@@ -765,7 +768,7 @@ static int igt_reset_queue(void *arg)
                pr_info("%s: Completed %d resets\n", engine->name, count);
 
                *h.batch = MI_BATCH_BUFFER_END;
-               wmb();
+               i915_gem_chipset_flush(i915);
 
                i915_gem_request_put(prev);
        }
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to