[PATCH 6/7] drm/ttm: make unlocking in ttm_bo_cleanup_refs optional

2017-11-09 Thread Christian König
Needed for the next patch.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/ttm/ttm_bo.c | 52 
 1 file changed, 28 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 6f5d18366e6e..50a678b504f3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -486,20 +486,21 @@ static void ttm_bo_cleanup_refs_or_queue(struct 
ttm_buffer_object *bo)
 }
 
 /**
- * function ttm_bo_cleanup_refs_and_unlock
+ * function ttm_bo_cleanup_refs
  * If bo idle, remove from delayed- and lru lists, and unref.
  * If not idle, do nothing.
  *
  * Must be called with lru_lock and reservation held, this function
- * will drop both before returning.
+ * will drop the lru lock and optionally the reservation lock before returning.
  *
  * @interruptible Any sleeps should occur interruptibly.
  * @no_wait_gpu   Never wait for gpu. Return -EBUSY instead.
+ * @unlock_resv   Unlock the reservation lock as well.
  */
 
-static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait_gpu)
+static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
+  bool interruptible, bool no_wait_gpu,
+  bool unlock_resv)
 {
struct ttm_bo_global *glob = bo->glob;
struct reservation_object *resv;
@@ -518,7 +519,8 @@ static int ttm_bo_cleanup_refs_and_unlock(struct 
ttm_buffer_object *bo,
if (ret && !no_wait_gpu) {
long lret;
 
-   reservation_object_unlock(bo->resv);
+   if (unlock_resv)
+   reservation_object_unlock(bo->resv);
spin_unlock(&glob->lru_lock);
 
lret = reservation_object_wait_timeout_rcu(resv, true,
@@ -531,19 +533,22 @@ static int ttm_bo_cleanup_refs_and_unlock(struct 
ttm_buffer_object *bo,
return -EBUSY;
 
spin_lock(&glob->lru_lock);
-   ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
-
-   /*
-* We raced, and lost, someone else holds the reservation now,
-* and is probably busy in ttm_bo_cleanup_memtype_use.
-*
-* Even if it's not the case, because we finished waiting any
-* delayed destruction would succeed, so just return success
-* here.
-*/
-   if (ret) {
-   spin_unlock(&glob->lru_lock);
-   return 0;
+   if (unlock_resv) {
+   ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
+   /*
+* We raced, and lost, someone else holds the 
reservation now,
+* and is probably busy in ttm_bo_cleanup_memtype_use.
+*
+* Even if it's not the case, because we finished 
waiting any
+* delayed destruction would succeed, so just return 
success
+* here.
+*/
+   if (ret) {
+   spin_unlock(&glob->lru_lock);
+   return 0;
+   }
+   } else {
+   ret = 0;
}
}
 
@@ -600,8 +605,8 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device 
*bdev, bool remove_all)
}
 
if (!ret)
-   ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
-!remove_all);
+   ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
+ true);
else
spin_unlock(&glob->lru_lock);
 
@@ -770,8 +775,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
kref_get(&bo->list_kref);
 
if (!list_empty(&bo->ddestroy)) {
-   ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
-no_wait_gpu);
+   ret = ttm_bo_cleanup_refs(bo, interruptible, no_wait_gpu, true);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
@@ -1735,7 +1739,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
kref_get(&bo->list_kref);
 
if (!list_empty(&bo->ddestroy)) {
-   ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+   ret = ttm_bo_cleanup_refs(bo, false, false, true);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
-- 
2.11.0

___
amd-gfx mailing list
amd-gfx@list

RE: [PATCH 6/7] drm/ttm: make unlocking in ttm_bo_cleanup_refs optional

2017-11-10 Thread He, Roger
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
   bool interruptible, bool no_wait_gpu,
   bool unlock_resv)
{
..
ttm_bo_del_from_lru(bo);
list_del_init(&bo->ddestroy);
kref_put(&bo->list_kref, ttm_bo_ref_bug);

spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);

if (unlock_resv)
  //[Roger]: not sure we should add condition here as well. If not, for 
per-vm-bo, eviction would unlock the VM root BO resv obj which is not we want I 
think.
reservation_object_unlock(bo->resv);

return 0;
}


Thanks
Roger(Hongbo.He)

-Original Message-
From: Christian König [mailto:ckoenig.leichtzumer...@gmail.com] 
Sent: Thursday, November 09, 2017 4:59 PM
To: dri-de...@lists.freedesktop.org; amd-gfx@lists.freedesktop.org; He, Roger 
; mic...@daenzer.net
Subject: [PATCH 6/7] drm/ttm: make unlocking in ttm_bo_cleanup_refs optional

Needed for the next patch.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/ttm/ttm_bo.c | 52 
 1 file changed, 28 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 
6f5d18366e6e..50a678b504f3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -486,20 +486,21 @@ static void ttm_bo_cleanup_refs_or_queue(struct 
ttm_buffer_object *bo)  }
 
 /**
- * function ttm_bo_cleanup_refs_and_unlock
+ * function ttm_bo_cleanup_refs
  * If bo idle, remove from delayed- and lru lists, and unref.
  * If not idle, do nothing.
  *
  * Must be called with lru_lock and reservation held, this function
- * will drop both before returning.
+ * will drop the lru lock and optionally the reservation lock before returning.
  *
  * @interruptible Any sleeps should occur interruptibly.
  * @no_wait_gpu   Never wait for gpu. Return -EBUSY instead.
+ * @unlock_resv   Unlock the reservation lock as well.
  */
 
-static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait_gpu)
+static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
+  bool interruptible, bool no_wait_gpu,
+  bool unlock_resv)
 {
struct ttm_bo_global *glob = bo->glob;
struct reservation_object *resv;
@@ -518,7 +519,8 @@ static int ttm_bo_cleanup_refs_and_unlock(struct 
ttm_buffer_object *bo,
if (ret && !no_wait_gpu) {
long lret;
 
-   reservation_object_unlock(bo->resv);
+   if (unlock_resv)
+   reservation_object_unlock(bo->resv);
spin_unlock(&glob->lru_lock);
 
lret = reservation_object_wait_timeout_rcu(resv, true, @@ 
-531,19 +533,22 @@ static int ttm_bo_cleanup_refs_and_unlock(struct 
ttm_buffer_object *bo,
return -EBUSY;
 
spin_lock(&glob->lru_lock);
-   ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
-
-   /*
-* We raced, and lost, someone else holds the reservation now,
-* and is probably busy in ttm_bo_cleanup_memtype_use.
-*
-* Even if it's not the case, because we finished waiting any
-* delayed destruction would succeed, so just return success
-* here.
-*/
-   if (ret) {
-   spin_unlock(&glob->lru_lock);
-   return 0;
+   if (unlock_resv) {
+   ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
+   /*
+* We raced, and lost, someone else holds the 
reservation now,
+* and is probably busy in ttm_bo_cleanup_memtype_use.
+*
+* Even if it's not the case, because we finished 
waiting any
+* delayed destruction would succeed, so just return 
success
+* here.
+*/
+   if (ret) {
+   spin_unlock(&glob->lru_lock);
+   return 0;
+   }
+   } else {
+   ret = 0;
}
}
 
@@ -600,8 +605,8 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device 
*bdev, bool remove_all)
}
 
if (!ret)
-   ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
-!rem

Re: [PATCH 6/7] drm/ttm: make unlocking in ttm_bo_cleanup_refs optional

2017-11-12 Thread Christian König
Good catch and yeah that is actually the real problem why my original 
patch didn't worked as expected.


Going to fix this in v2,
Christian.

Am 10.11.2017 um 10:55 schrieb He, Roger:

static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
   bool interruptible, bool no_wait_gpu,
   bool unlock_resv)
{
..
ttm_bo_del_from_lru(bo);
list_del_init(&bo->ddestroy);
kref_put(&bo->list_kref, ttm_bo_ref_bug);

spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);

if (unlock_resv)
  //[Roger]: not sure we should add condition here as well. If not, for 
per-vm-bo, eviction would unlock the VM root BO resv obj which is not we want I 
think.
reservation_object_unlock(bo->resv);

return 0;
}


Thanks
Roger(Hongbo.He)

-Original Message-
From: Christian König [mailto:ckoenig.leichtzumer...@gmail.com]
Sent: Thursday, November 09, 2017 4:59 PM
To: dri-de...@lists.freedesktop.org; amd-gfx@lists.freedesktop.org; He, Roger 
; mic...@daenzer.net
Subject: [PATCH 6/7] drm/ttm: make unlocking in ttm_bo_cleanup_refs optional

Needed for the next patch.

Signed-off-by: Christian König 
---
  drivers/gpu/drm/ttm/ttm_bo.c | 52 
  1 file changed, 28 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 
6f5d18366e6e..50a678b504f3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -486,20 +486,21 @@ static void ttm_bo_cleanup_refs_or_queue(struct 
ttm_buffer_object *bo)  }
  
  /**

- * function ttm_bo_cleanup_refs_and_unlock
+ * function ttm_bo_cleanup_refs
   * If bo idle, remove from delayed- and lru lists, and unref.
   * If not idle, do nothing.
   *
   * Must be called with lru_lock and reservation held, this function
- * will drop both before returning.
+ * will drop the lru lock and optionally the reservation lock before returning.
   *
   * @interruptible Any sleeps should occur interruptibly.
   * @no_wait_gpu   Never wait for gpu. Return -EBUSY instead.
+ * @unlock_resv   Unlock the reservation lock as well.
   */
  
-static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,

- bool interruptible,
- bool no_wait_gpu)
+static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
+  bool interruptible, bool no_wait_gpu,
+  bool unlock_resv)
  {
struct ttm_bo_global *glob = bo->glob;
struct reservation_object *resv;
@@ -518,7 +519,8 @@ static int ttm_bo_cleanup_refs_and_unlock(struct 
ttm_buffer_object *bo,
if (ret && !no_wait_gpu) {
long lret;
  
-		reservation_object_unlock(bo->resv);

+   if (unlock_resv)
+   reservation_object_unlock(bo->resv);
spin_unlock(&glob->lru_lock);
  
  		lret = reservation_object_wait_timeout_rcu(resv, true, @@ -531,19 +533,22 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,

return -EBUSY;
  
  		spin_lock(&glob->lru_lock);

-   ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
-
-   /*
-* We raced, and lost, someone else holds the reservation now,
-* and is probably busy in ttm_bo_cleanup_memtype_use.
-*
-* Even if it's not the case, because we finished waiting any
-* delayed destruction would succeed, so just return success
-* here.
-*/
-   if (ret) {
-   spin_unlock(&glob->lru_lock);
-   return 0;
+   if (unlock_resv) {
+   ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
+   /*
+* We raced, and lost, someone else holds the 
reservation now,
+* and is probably busy in ttm_bo_cleanup_memtype_use.
+*
+* Even if it's not the case, because we finished 
waiting any
+* delayed destruction would succeed, so just return 
success
+* here.
+*/
+   if (ret) {
+   spin_unlock(&glob->lru_lock);
+   return 0;
+   }
+   } else {
+   ret = 0;
}
}
  
@@ -600,8 +605,8 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)

}