As we guarantee to emit a flush before emitting the breadcrumb or
the next batchbuffer, there is no further need for the flushing list.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c   |    7 ----
 drivers/gpu/drm/i915/i915_drv.h       |   19 +++--------
 drivers/gpu/drm/i915/i915_gem.c       |   58 +++++----------------------------
 drivers/gpu/drm/i915/i915_gem_evict.c |   20 ------------
 4 files changed, 13 insertions(+), 91 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index a8b7db6..1312b79 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -44,7 +44,6 @@
 
 enum {
        ACTIVE_LIST,
-       FLUSHING_LIST,
        INACTIVE_LIST,
        PINNED_LIST,
 };
@@ -178,10 +177,6 @@ static int i915_gem_object_list_info(struct seq_file *m, 
void *data)
                seq_printf(m, "Inactive:\n");
                head = &dev_priv->mm.inactive_list;
                break;
-       case FLUSHING_LIST:
-               seq_printf(m, "Flushing:\n");
-               head = &dev_priv->mm.flushing_list;
-               break;
        default:
                mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
@@ -239,7 +234,6 @@ static int i915_gem_object_info(struct seq_file *m, void* 
data)
 
        size = count = mappable_size = mappable_count = 0;
        count_objects(&dev_priv->mm.active_list, mm_list);
-       count_objects(&dev_priv->mm.flushing_list, mm_list);
        seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
                   count, mappable_count, size, mappable_size);
 
@@ -2007,7 +2001,6 @@ static struct drm_info_list i915_debugfs_list[] = {
        {"i915_gem_gtt", i915_gem_gtt_info, 0},
        {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
        {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
-       {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) 
FLUSHING_LIST},
        {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) 
INACTIVE_LIST},
        {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
        {"i915_gem_request", i915_gem_request_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b398b72..203562b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -696,17 +696,6 @@ typedef struct drm_i915_private {
                struct list_head active_list;
 
                /**
-                * List of objects which are not in the ringbuffer but which
-                * still have a write_domain which needs to be flushed before
-                * unbinding.
-                *
-                * last_rendering_seqno is 0 while an object is in this list.
-                *
-                * A reference is held on the buffer while on this list.
-                */
-               struct list_head flushing_list;
-
-               /**
                 * LRU list of objects which are not in the ringbuffer and
                 * are ready to unbind, but are still in the GTT.
                 *
@@ -873,7 +862,7 @@ struct drm_i915_gem_object {
        struct drm_mm_node *gtt_space;
        struct list_head gtt_list;
 
-       /** This object's place on the active/flushing/inactive lists */
+       /** This object's place on the active/inactive lists */
        struct list_head ring_list;
        struct list_head mm_list;
        /** This object's place on GPU write list */
@@ -882,9 +871,9 @@ struct drm_i915_gem_object {
        struct list_head exec_list;
 
        /**
-        * This is set if the object is on the active or flushing lists
-        * (has pending rendering), and is not set if it's on inactive (ready
-        * to be unbound).
+        * This is set if the object is on the active lists (has pending
+        * rendering and so a non-zero seqno), and is not set if it i s on
+        * inactive (ready to be unbound) list.
         */
        unsigned int active:1;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 49b8864..ee20fdb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1458,27 +1458,6 @@ i915_gem_object_move_to_active(struct 
drm_i915_gem_object *obj,
 }
 
 static void
-i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
-{
-       list_del_init(&obj->ring_list);
-       obj->last_read_seqno = 0;
-       obj->last_write_seqno = 0;
-       obj->last_fenced_seqno = 0;
-}
-
-static void
-i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
-{
-       struct drm_device *dev = obj->base.dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       BUG_ON(!obj->active);
-       list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
-
-       i915_gem_object_move_off_active(obj);
-}
-
-static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
        struct drm_device *dev = obj->base.dev;
@@ -1488,9 +1467,15 @@ i915_gem_object_move_to_inactive(struct 
drm_i915_gem_object *obj)
 
        BUG_ON(!list_empty(&obj->gpu_write_list));
        BUG_ON(!obj->active);
+
+       list_del_init(&obj->ring_list);
        obj->ring = NULL;
 
-       i915_gem_object_move_off_active(obj);
+       obj->last_read_seqno = 0;
+       obj->last_write_seqno = 0;
+       obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+
+       obj->last_fenced_seqno = 0;
        obj->fenced_gpu_access = false;
 
        obj->active = 0;
@@ -1693,7 +1678,6 @@ static void i915_gem_reset_ring_lists(struct 
drm_i915_private *dev_priv,
                                       struct drm_i915_gem_object,
                                       ring_list);
 
-               obj->base.write_domain = 0;
                list_del_init(&obj->gpu_write_list);
                i915_gem_object_move_to_inactive(obj);
        }
@@ -1730,20 +1714,6 @@ void i915_gem_reset(struct drm_device *dev)
        for_each_ring(ring, dev_priv, i)
                i915_gem_reset_ring_lists(dev_priv, ring);
 
-       /* Remove anything from the flushing lists. The GPU cache is likely
-        * to be lost on reset along with the data, so simply move the
-        * lost bo to the inactive list.
-        */
-       while (!list_empty(&dev_priv->mm.flushing_list)) {
-               obj = list_first_entry(&dev_priv->mm.flushing_list,
-                                     struct drm_i915_gem_object,
-                                     mm_list);
-
-               obj->base.write_domain = 0;
-               list_del_init(&obj->gpu_write_list);
-               i915_gem_object_move_to_inactive(obj);
-       }
-
        /* Move everything out of the GPU domains to ensure we do any
         * necessary invalidation upon reuse.
         */
@@ -1814,10 +1784,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer 
*ring)
                if (!i915_seqno_passed(seqno, obj->last_read_seqno))
                        break;
 
-               if (obj->base.write_domain != 0)
-                       i915_gem_object_move_to_flushing(obj);
-               else
-                       i915_gem_object_move_to_inactive(obj);
+               i915_gem_object_move_to_inactive(obj);
        }
 
        if (unlikely(ring->trace_irq_seqno &&
@@ -3891,7 +3858,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        }
 
        BUG_ON(!list_empty(&dev_priv->mm.active_list));
-       BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
        BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
        mutex_unlock(&dev->struct_mutex);
 
@@ -3949,7 +3915,6 @@ i915_gem_load(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        INIT_LIST_HEAD(&dev_priv->mm.active_list);
-       INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
@@ -4200,12 +4165,7 @@ static int
 i915_gpu_is_active(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       int lists_empty;
-
-       lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-                     list_empty(&dev_priv->mm.active_list);
-
-       return !lists_empty;
+       return !list_empty(&dev_priv->mm.active_list);
 }
 
 static int
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c 
b/drivers/gpu/drm/i915/i915_gem_evict.c
index ae7c24e..f61af59 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -92,23 +92,6 @@ i915_gem_evict_something(struct drm_device *dev, int 
min_size,
 
        /* Now merge in the soon-to-be-expired objects... */
        list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-               /* Does the object require an outstanding flush? */
-               if (obj->base.write_domain)
-                       continue;
-
-               if (mark_free(obj, &unwind_list))
-                       goto found;
-       }
-
-       /* Finally add anything with a pending flush (in order of retirement) */
-       list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
-               if (mark_free(obj, &unwind_list))
-                       goto found;
-       }
-       list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-               if (!obj->base.write_domain)
-                       continue;
-
                if (mark_free(obj, &unwind_list))
                        goto found;
        }
@@ -171,7 +154,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool 
purgeable_only)
        int ret;
 
        lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
-                      list_empty(&dev_priv->mm.flushing_list) &&
                       list_empty(&dev_priv->mm.active_list));
        if (lists_empty)
                return -ENOSPC;
@@ -188,8 +170,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool 
purgeable_only)
 
        i915_gem_retire_requests(dev);
 
-       BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-
        /* Having flushed everything, unbind() should never raise an error */
        list_for_each_entry_safe(obj, next,
                                 &dev_priv->mm.inactive_list, mm_list) {
-- 
1.7.10.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to