Instead of dropping everything, waiting for the bo to be unreserved
and trying over, a better strategy would be to do a blocking wait.

This can be mapped a lot better to a mutex_lock-like call.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst at canonical.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c    | 47 +++++++++++++++++++++++++++++++++++++++++
 include/drm/ttm/ttm_bo_driver.h | 30 ++++++++++++++++++++++++++
 2 files changed, 77 insertions(+)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 61b5cd0..174b325 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -310,6 +310,53 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
        return ret;
 }

+int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
+                                 bool interruptible, uint32_t sequence)
+{
+       bool wake_up = false;
+       int ret;
+
+       while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
+               WARN_ON(bo->seq_valid && sequence == bo->val_seq);
+
+               ret = ttm_bo_wait_unreserved(bo, interruptible);
+
+               if (unlikely(ret))
+                       return ret;
+       }
+
+       if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
+               wake_up = true;
+
+       /**
+        * Wake up waiters that may need to recheck for deadlock,
+        * if we decreased the sequence number.
+        */
+       bo->val_seq = sequence;
+       bo->seq_valid = true;
+       if (wake_up)
+               wake_up_all(&bo->event_queue);
+
+       return 0;
+}
+
+int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+                           bool interruptible, uint32_t sequence)
+{
+       struct ttm_bo_global *glob = bo->glob;
+       int put_count, ret;
+
+       ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
+       if (likely(!ret)) {
+               spin_lock(&glob->lru_lock);
+               put_count = ttm_bo_del_from_lru(bo);
+               spin_unlock(&glob->lru_lock);
+               ttm_bo_list_ref_sub(bo, put_count, true);
+       }
+       return ret;
+}
+EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
+
 void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
 {
        ttm_bo_add_to_lru(bo);
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 6fff432..5af71af 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -821,6 +821,36 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
                          bool interruptible,
                          bool no_wait, bool use_sequence, uint32_t sequence);

+/**
+ * ttm_bo_reserve_slowpath_nolru:
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @sequence: Set (@bo)->sequence to this value after lock
+ *
+ * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
+ * from all our other reservations. Because there are no other reservations
+ * held by us, this function cannot deadlock any more.
+ *
+ * Will not remove reserved buffers from the lru lists.
+ * Otherwise identical to ttm_bo_reserve_slowpath.
+ */
+extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
+                                        bool interruptible,
+                                        uint32_t sequence);
+
+
+/**
+ * ttm_bo_reserve_slowpath:
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @sequence: Set (@bo)->sequence to this value after lock
+ *
+ * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
+ * from all our other reservations. Because there are no other reservations
+ * held by us, this function cannot deadlock any more.
+ */
+extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+                                  bool interruptible, uint32_t sequence);

 /**
  * ttm_bo_reserve_nolru:
-- 
1.8.0

Reply via email to