After moving the write flushing into the move_to_ring, we can simplify
the execbuffer flush by not having to compute inter-ring flushes.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   73 +++++++---------------------
 1 files changed, 17 insertions(+), 56 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3c54911..d04bd3d 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -36,7 +36,6 @@
 struct change_domains {
        uint32_t invalidate_domains;
        uint32_t flush_domains;
-       uint32_t flush_rings;
        uint32_t flips;
 };
 
@@ -156,7 +155,7 @@ i915_gem_object_set_to_gpu_domain(struct 
drm_i915_gem_object *obj,
                                  struct intel_ring_buffer *ring,
                                  struct change_domains *cd)
 {
-       uint32_t invalidate_domains = 0, flush_domains = 0;
+       uint32_t flush = 0;
 
        /*
         * If the object isn't moving to a new write domain,
@@ -172,22 +171,17 @@ i915_gem_object_set_to_gpu_domain(struct 
drm_i915_gem_object *obj,
         * write domain
         */
        if (obj->base.write_domain &&
-           (obj->base.write_domain != obj->base.pending_read_domains ||
-            obj->ring != ring)) {
-               flush_domains |= obj->base.write_domain;
-               invalidate_domains |=
-                       obj->base.pending_read_domains & 
~obj->base.write_domain;
-       }
+           obj->base.write_domain != obj->base.pending_read_domains)
+               flush = obj->base.write_domain;
        /*
         * Invalidate any read caches which may have
         * stale data. That is, any new read domains.
         */
-       invalidate_domains |= obj->base.pending_read_domains & 
~obj->base.read_domains;
-       if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
+       if (flush & I915_GEM_DOMAIN_CPU)
                i915_gem_clflush_object(obj);
 
        /* blow away mappings if mapped through GTT */
-       if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
+       if (flush & I915_GEM_DOMAIN_GTT)
                i915_gem_release_mmap(obj);
 
        if (obj->base.pending_write_domain)
@@ -199,15 +193,12 @@ i915_gem_object_set_to_gpu_domain(struct 
drm_i915_gem_object *obj,
         * write_domains).  So if we have a current write domain that we
         * aren't changing, set pending_write_domain to that.
         */
-       if (flush_domains == 0 && obj->base.pending_write_domain == 0)
+       if (flush == 0 && obj->base.pending_write_domain == 0)
                obj->base.pending_write_domain = obj->base.write_domain;
 
-       cd->invalidate_domains |= invalidate_domains;
-       cd->flush_domains |= flush_domains;
-       if (flush_domains & I915_GEM_GPU_DOMAINS)
-               cd->flush_rings |= obj->ring->id;
-       if (invalidate_domains & I915_GEM_GPU_DOMAINS)
-               cd->flush_rings |= ring->id;
+       cd->flush_domains |= flush;
+       cd->invalidate_domains |=
+               obj->base.pending_read_domains & ~obj->base.read_domains;
 }
 
 struct eb_objects {
@@ -710,35 +701,6 @@ err:
 }
 
 static int
-i915_gem_execbuffer_flush(struct drm_device *dev,
-                         uint32_t invalidate_domains,
-                         uint32_t flush_domains,
-                         uint32_t flush_rings)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       int i, ret;
-
-       if (flush_domains & I915_GEM_DOMAIN_CPU)
-               intel_gtt_chipset_flush();
-
-       if (flush_domains & I915_GEM_DOMAIN_GTT)
-               wmb();
-
-       if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
-               for (i = 0; i < I915_NUM_RINGS; i++)
-                       if (flush_rings & (1 << i)) {
-                               ret = i915_gem_flush_ring(&dev_priv->ring[i],
-                                                         invalidate_domains,
-                                                         flush_domains);
-                               if (ret)
-                                       return ret;
-                       }
-       }
-
-       return 0;
-}
-
-static int
 i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
 {
        u32 plane, flip_mask;
@@ -783,14 +745,11 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer 
*ring,
        list_for_each_entry(obj, objects, exec_list)
                i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
 
-       if (cd.invalidate_domains | cd.flush_domains) {
-               ret = i915_gem_execbuffer_flush(ring->dev,
-                                               cd.invalidate_domains,
-                                               cd.flush_domains,
-                                               cd.flush_rings);
-               if (ret)
-                       return ret;
-       }
+       if (cd.flush_domains & I915_GEM_DOMAIN_CPU)
+               intel_gtt_chipset_flush();
+
+       if (cd.flush_domains & I915_GEM_DOMAIN_GTT)
+               wmb();
 
        if (cd.flips) {
                ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
@@ -804,7 +763,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer 
*ring,
                        return ret;
        }
 
-       return 0;
+       return i915_gem_flush_ring(ring,
+                                  cd.invalidate_domains,
+                                  cd.flush_domains);
 }
 
 static bool
-- 
1.7.4.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to