Require lru_lock and reservation to be held, kill off the loop, no
new sync objects should be attached at this point any more.

v2:
 - moved upwards in patch list and fixed conflicts.
v3:
 - rebase for fence lock, and rename to ttm_bo_cleanup_refs_and_unlock
   for clarity that it unlocks lru.
v4:
 - add WARN_ON(!atomic_read(&bo->kref.refcount)) in reserve to ensure
   that nobody accidentally reserves a dead buffer.

Signed-off-by: Maarten Lankhorst <maarten.lankho...@canonical.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c           | 123 ++++++++++++++++-----------------
 drivers/gpu/drm/ttm/ttm_execbuf_util.c |   1 +
 2 files changed, 62 insertions(+), 62 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 70285ff..e6df086 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -296,6 +296,8 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
        int put_count = 0;
        int ret;
 
+       WARN_ON(!atomic_read(&bo->kref.refcount));
+
        spin_lock(&glob->lru_lock);
        ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
                                    sequence);
@@ -522,82 +524,78 @@ queue:
 }
 
 /**
- * function ttm_bo_cleanup_refs
+ * function ttm_bo_cleanup_refs_and_unlock
  * If bo idle, remove from delayed- and lru lists, and unref.
  * If not idle, do nothing.
  *
+ * Must be called with lru_lock and reservation held, this function
+ * will drop both before returning.
+ *
  * @interruptible         Any sleeps should occur interruptibly.
- * @no_wait_reserve       Never wait for reserve. Return -EBUSY instead.
  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
  */
 
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
-                              bool interruptible,
-                              bool no_wait_reserve,
-                              bool no_wait_gpu)
+static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
+                                         bool interruptible,
+                                         bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_driver *driver = bdev->driver;
        struct ttm_bo_global *glob = bo->glob;
        int put_count;
        int ret = 0;
-       void *sync_obj;
-
-retry:
-       spin_lock(&glob->lru_lock);
-
-       ret = ttm_bo_reserve_locked(bo, interruptible,
-                                   no_wait_reserve, false, 0);
 
-       if (unlikely(ret)) {
-               spin_unlock(&glob->lru_lock);
-               return ret;
-       }
+       spin_lock(&bdev->fence_lock);
+       ret = ttm_bo_wait(bo, false, false, true);
 
-       if (unlikely(list_empty(&bo->ddestroy))) {
+       if (ret && no_wait_gpu) {
+               spin_unlock(&bdev->fence_lock);
                atomic_set(&bo->reserved, 0);
                wake_up_all(&bo->event_queue);
                spin_unlock(&glob->lru_lock);
-               return 0;
-       }
-
-       spin_lock(&bdev->fence_lock);
-       ret = ttm_bo_wait(bo, false, false, true);
-       if (ret) {
-               if (no_wait_gpu) {
-                       spin_unlock(&bdev->fence_lock);
-                       atomic_set(&bo->reserved, 0);
-                       wake_up_all(&bo->event_queue);
-                       spin_unlock(&glob->lru_lock);
-                       return ret;
-               }
+               return ret;
+       } else if (ret) {
+               void *sync_obj;
 
                /**
-                * Take a reference to the fence and unreserve, if the wait
-                * was succesful and no new sync_obj was attached,
-                * ttm_bo_wait in retry will return ret = 0, and end the loop.
+                * Take a reference to the fence and unreserve,
+                * at this point the buffer should be dead, so
+                * no new sync objects can be attached.
                 */
-
                sync_obj = driver->sync_obj_ref(&bo->sync_obj);
                spin_unlock(&bdev->fence_lock);
                atomic_set(&bo->reserved, 0);
                wake_up_all(&bo->event_queue);
                spin_unlock(&glob->lru_lock);
 
-               ret = driver->sync_obj_wait(bo->sync_obj, false, interruptible);
+               ret = driver->sync_obj_wait(sync_obj, false, interruptible);
                driver->sync_obj_unref(&sync_obj);
                if (ret)
                        return ret;
-               goto retry;
+               spin_lock(&glob->lru_lock);
+
+               /* remove sync_obj with ttm_bo_wait */
+               spin_lock(&bdev->fence_lock);
+               ret = ttm_bo_wait(bo, false, false, true);
+               spin_unlock(&bdev->fence_lock);
+
+               WARN_ON(ret);
+
+       } else {
+               spin_unlock(&bdev->fence_lock);
+               atomic_set(&bo->reserved, 0);
+               wake_up_all(&bo->event_queue);
+       }
+
+       if (unlikely(list_empty(&bo->ddestroy))) {
+               spin_unlock(&glob->lru_lock);
+               return 0;
        }
-       spin_unlock(&bdev->fence_lock);
 
        put_count = ttm_bo_del_from_lru(bo);
        list_del_init(&bo->ddestroy);
        ++put_count;
 
-       atomic_set(&bo->reserved, 0);
-       wake_up_all(&bo->event_queue);
        spin_unlock(&glob->lru_lock);
 
        ttm_bo_list_ref_sub(bo, put_count, true);
@@ -606,8 +604,8 @@ retry:
 }
 
 /**
- * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
- * encountered buffers.
+ * Traverse the delayed list, and call ttm_bo_cleanup_refs_and_unlock
+ * on all encountered buffers.
  */
 
 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
@@ -633,9 +631,14 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device 
*bdev, bool remove_all)
                        kref_get(&nentry->list_kref);
                }
 
-               spin_unlock(&glob->lru_lock);
-               ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
-                                         !remove_all);
+               ret = ttm_bo_reserve_locked(entry, false, !remove_all,
+                                           false, 0);
+               if (ret)
+                       spin_unlock(&glob->lru_lock);
+               else
+                       ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
+                                                            !remove_all);
+
                kref_put(&entry->list_kref, ttm_bo_release_list);
                entry = nentry;
 
@@ -788,15 +791,6 @@ retry:
        bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
        kref_get(&bo->list_kref);
 
-       if (!list_empty(&bo->ddestroy)) {
-               spin_unlock(&glob->lru_lock);
-               ret = ttm_bo_cleanup_refs(bo, interruptible,
-                                         no_wait_reserve, no_wait_gpu);
-               kref_put(&bo->list_kref, ttm_bo_release_list);
-
-               return ret;
-       }
-
        ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
 
        if (unlikely(ret == -EBUSY)) {
@@ -815,6 +809,13 @@ retry:
                goto retry;
        }
 
+       if (!list_empty(&bo->ddestroy)) {
+               ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
+                                                    no_wait_gpu);
+               kref_put(&bo->list_kref, ttm_bo_release_list);
+               return ret;
+       }
+
        put_count = ttm_bo_del_from_lru(bo);
        spin_unlock(&glob->lru_lock);
 
@@ -1778,14 +1779,6 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
                                      struct ttm_buffer_object, swap);
                kref_get(&bo->list_kref);
 
-               if (!list_empty(&bo->ddestroy)) {
-                       spin_unlock(&glob->lru_lock);
-                       (void) ttm_bo_cleanup_refs(bo, false, false, false);
-                       kref_put(&bo->list_kref, ttm_bo_release_list);
-                       spin_lock(&glob->lru_lock);
-                       continue;
-               }
-
                /**
                 * Reserve buffer. Since we unlock while sleeping, we need
                 * to re-check that nobody removed us from the swap-list while
@@ -1801,6 +1794,12 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
                }
        }
 
+       if (!list_empty(&bo->ddestroy)) {
+               ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+               kref_put(&bo->list_kref, ttm_bo_release_list);
+               return ret;
+       }
+
        BUG_ON(ret != 0);
        put_count = ttm_bo_del_from_lru(bo);
        spin_unlock(&glob->lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c 
b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index cd9e452..5490492 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -152,6 +152,7 @@ retry:
        list_for_each_entry(entry, list, head) {
                struct ttm_buffer_object *bo = entry->bo;
 
+               WARN_ON(!atomic_read(&bo->kref.refcount));
 retry_this_bo:
                ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
                switch (ret) {
-- 
1.8.0

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to