[PATCH 3/7] drm/ttm: cleanup ttm_eu_reserve_buffers handling

2012-12-10 Thread Maarten Lankhorst
With the lru lock no longer required for protecting reservations we
can just do a ttm_bo_reserve_nolru on -EBUSY, and handle all errors
in a single path.

Signed-off-by: Maarten Lankhorst 
Reviewed-by: Jerome Glisse 
---
 drivers/gpu/drm/ttm/ttm_execbuf_util.c | 53 ++
 1 file changed, 21 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c 
b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index bd37b5c..c7d3236 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -82,22 +82,6 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
}
 }
 
-static int ttm_eu_wait_unreserved_locked(struct list_head *list,
-struct ttm_buffer_object *bo)
-{
-   struct ttm_bo_global *glob = bo->glob;
-   int ret;
-
-   ttm_eu_del_from_lru_locked(list);
-   spin_unlock(&glob->lru_lock);
-   ret = ttm_bo_wait_unreserved(bo, true);
-   spin_lock(&glob->lru_lock);
-   if (unlikely(ret != 0))
-   ttm_eu_backoff_reservation_locked(list);
-   return ret;
-}
-
-
 void ttm_eu_backoff_reservation(struct list_head *list)
 {
struct ttm_validate_buffer *entry;
@@ -152,19 +136,23 @@ retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
 
-retry_this_bo:
ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
switch (ret) {
case 0:
break;
case -EBUSY:
-   ret = ttm_eu_wait_unreserved_locked(list, bo);
-   if (unlikely(ret != 0)) {
-   spin_unlock(&glob->lru_lock);
-   ttm_eu_list_ref_sub(list);
-   return ret;
-   }
-   goto retry_this_bo;
+   ttm_eu_del_from_lru_locked(list);
+   spin_unlock(&glob->lru_lock);
+   ret = ttm_bo_reserve_nolru(bo, true, false,
+  true, val_seq);
+   spin_lock(&glob->lru_lock);
+   if (!ret)
+   break;
+
+   if (unlikely(ret != -EAGAIN))
+   goto err;
+
+   /* fallthrough */
case -EAGAIN:
ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
@@ -174,18 +162,13 @@ retry_this_bo:
return ret;
goto retry;
default:
-   ttm_eu_backoff_reservation_locked(list);
-   spin_unlock(&glob->lru_lock);
-   ttm_eu_list_ref_sub(list);
-   return ret;
+   goto err;
}
 
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
-   ttm_eu_backoff_reservation_locked(list);
-   spin_unlock(&glob->lru_lock);
-   ttm_eu_list_ref_sub(list);
-   return -EBUSY;
+   ret = -EBUSY;
+   goto err;
}
}
 
@@ -194,6 +177,12 @@ retry_this_bo:
ttm_eu_list_ref_sub(list);
 
return 0;
+
+err:
+   ttm_eu_backoff_reservation_locked(list);
+   spin_unlock(&glob->lru_lock);
+   ttm_eu_list_ref_sub(list);
+   return ret;
 }
 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
 
-- 
1.8.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH 3/7] drm/ttm: cleanup ttm_eu_reserve_buffers handling

2012-12-10 Thread Maarten Lankhorst
With the lru lock no longer required for protecting reservations we
can just do a ttm_bo_reserve_nolru on -EBUSY, and handle all errors
in a single path.

Signed-off-by: Maarten Lankhorst 
Reviewed-by: Jerome Glisse 
---
 drivers/gpu/drm/ttm/ttm_execbuf_util.c | 53 ++
 1 file changed, 21 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c 
b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index bd37b5c..c7d3236 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -82,22 +82,6 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
}
 }

-static int ttm_eu_wait_unreserved_locked(struct list_head *list,
-struct ttm_buffer_object *bo)
-{
-   struct ttm_bo_global *glob = bo->glob;
-   int ret;
-
-   ttm_eu_del_from_lru_locked(list);
-   spin_unlock(&glob->lru_lock);
-   ret = ttm_bo_wait_unreserved(bo, true);
-   spin_lock(&glob->lru_lock);
-   if (unlikely(ret != 0))
-   ttm_eu_backoff_reservation_locked(list);
-   return ret;
-}
-
-
 void ttm_eu_backoff_reservation(struct list_head *list)
 {
struct ttm_validate_buffer *entry;
@@ -152,19 +136,23 @@ retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;

-retry_this_bo:
ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
switch (ret) {
case 0:
break;
case -EBUSY:
-   ret = ttm_eu_wait_unreserved_locked(list, bo);
-   if (unlikely(ret != 0)) {
-   spin_unlock(&glob->lru_lock);
-   ttm_eu_list_ref_sub(list);
-   return ret;
-   }
-   goto retry_this_bo;
+   ttm_eu_del_from_lru_locked(list);
+   spin_unlock(&glob->lru_lock);
+   ret = ttm_bo_reserve_nolru(bo, true, false,
+  true, val_seq);
+   spin_lock(&glob->lru_lock);
+   if (!ret)
+   break;
+
+   if (unlikely(ret != -EAGAIN))
+   goto err;
+
+   /* fallthrough */
case -EAGAIN:
ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
@@ -174,18 +162,13 @@ retry_this_bo:
return ret;
goto retry;
default:
-   ttm_eu_backoff_reservation_locked(list);
-   spin_unlock(&glob->lru_lock);
-   ttm_eu_list_ref_sub(list);
-   return ret;
+   goto err;
}

entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
-   ttm_eu_backoff_reservation_locked(list);
-   spin_unlock(&glob->lru_lock);
-   ttm_eu_list_ref_sub(list);
-   return -EBUSY;
+   ret = -EBUSY;
+   goto err;
}
}

@@ -194,6 +177,12 @@ retry_this_bo:
ttm_eu_list_ref_sub(list);

return 0;
+
+err:
+   ttm_eu_backoff_reservation_locked(list);
+   spin_unlock(&glob->lru_lock);
+   ttm_eu_list_ref_sub(list);
+   return ret;
 }
 EXPORT_SYMBOL(ttm_eu_reserve_buffers);

-- 
1.8.0