RE: [PATCH 2/6] drm/ttm: add BO priorities for the LRUs

2017-01-15 Thread He, Hongbo
Reviewed-by: Roger.He <hongbo...@amd.com> 

-Original Message-
From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of 
Christian K?nig
Sent: Friday, January 13, 2017 5:51 PM
To: He, Hongbo <hongbo...@amd.com>
Cc: dri-de...@lists.freedesktop.org; amd-gfx@lists.freedesktop.org
Subject: [PATCH 2/6] drm/ttm: add BO priorities for the LRUs

From: Christian König <christian.koe...@amd.com>

This way the driver can specify a priority for a BO which has the effect that a 
BO is only evicted when all other BOs with a lower priority are evicted first.

Signed-off-by: Christian König <christian.koe...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c |  4 +-
 drivers/gpu/drm/ttm/ttm_bo.c| 67 ++---
 include/drm/ttm/ttm_bo_api.h|  2 +
 include/drm/ttm/ttm_bo_driver.h |  6 ++-
 4 files changed, 52 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 0717dd1..0a61930 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1166,8 +1166,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
struct amdgpu_mman_lru *lru = >mman.log2_size[i];
 
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
-   lru->lru[j] = >mman.bdev.man[j].lru;
-   lru->swap_lru = >mman.bdev.glob->swap_lru;
+   lru->lru[j] = >mman.bdev.man[j].lru[0];
+   lru->swap_lru = >mman.bdev.glob->swap_lru[0];
}
 
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) diff --git 
a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 
6683399..f078b43 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -242,13 +242,13 @@ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
 
 struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo)  {
-   return bo->bdev->man[bo->mem.mem_type].lru.prev;
+   return bo->bdev->man[bo->mem.mem_type].lru[bo->priority].prev;
 }
 EXPORT_SYMBOL(ttm_bo_default_lru_tail);
 
 struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo)  {
-   return bo->glob->swap_lru.prev;
+   return bo->glob->swap_lru[bo->priority].prev;
 }
 EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail);
 
@@ -741,20 +741,27 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ttm_mem_type_manager *man = >man[mem_type];
struct ttm_buffer_object *bo;
int ret = -EBUSY, put_count;
+   unsigned i;
 
spin_lock(>lru_lock);
-   list_for_each_entry(bo, >lru, lru) {
-   ret = __ttm_bo_reserve(bo, false, true, NULL);
-   if (ret)
-   continue;
+   for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+   list_for_each_entry(bo, >lru[i], lru) {
+   ret = __ttm_bo_reserve(bo, false, true, NULL);
+   if (ret)
+   continue;
 
-   if (place && !bdev->driver->eviction_valuable(bo, place)) {
-   __ttm_bo_unreserve(bo);
-   ret = -EBUSY;
-   continue;
+   if (place && !bdev->driver->eviction_valuable(bo,
+ place)) {
+   __ttm_bo_unreserve(bo);
+   ret = -EBUSY;
+   continue;
+   }
+
+   break;
}
 
-   break;
+   if (!ret)
+   break;
}
 
if (ret) {
@@ -1197,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
}
atomic_inc(>glob->bo_count);
drm_vma_node_reset(>vma_node);
+   bo->priority = 0;
 
/*
 * For ttm_bo_type_device buffers, allocate @@ -1297,18 +1305,21 @@ 
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob = bdev->glob;
struct fence *fence;
int ret;
+   unsigned i;
 
/*
 * Can't use standard list traversal since we're unlocking.
 */
 
spin_lock(>lru_lock);
-   while (!list_empty(>lru)) {
-   spin_unlock(>lru_lock);
-   ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
-   if (ret)
-   return ret;
-   spin_lock(>lru_lock);
+   for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+   while (!list_empty(>lru[i])) {
+   spin_unlock(>lru_lock);
+   ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, 
false);
+   if (ret)

[PATCH 2/6] drm/ttm: add BO priorities for the LRUs

2017-01-13 Thread Christian König
From: Christian König 

This way the driver can specify a priority for a BO which has the effect that
a BO is only evicted when all other BOs with a lower priority are evicted
first.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c |  4 +-
 drivers/gpu/drm/ttm/ttm_bo.c| 67 ++---
 include/drm/ttm/ttm_bo_api.h|  2 +
 include/drm/ttm/ttm_bo_driver.h |  6 ++-
 4 files changed, 52 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 0717dd1..0a61930 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1166,8 +1166,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
struct amdgpu_mman_lru *lru = >mman.log2_size[i];
 
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
-   lru->lru[j] = >mman.bdev.man[j].lru;
-   lru->swap_lru = >mman.bdev.glob->swap_lru;
+   lru->lru[j] = >mman.bdev.man[j].lru[0];
+   lru->swap_lru = >mman.bdev.glob->swap_lru[0];
}
 
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 6683399..f078b43 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -242,13 +242,13 @@ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
 
 struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo)
 {
-   return bo->bdev->man[bo->mem.mem_type].lru.prev;
+   return bo->bdev->man[bo->mem.mem_type].lru[bo->priority].prev;
 }
 EXPORT_SYMBOL(ttm_bo_default_lru_tail);
 
 struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo)
 {
-   return bo->glob->swap_lru.prev;
+   return bo->glob->swap_lru[bo->priority].prev;
 }
 EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail);
 
@@ -741,20 +741,27 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ttm_mem_type_manager *man = >man[mem_type];
struct ttm_buffer_object *bo;
int ret = -EBUSY, put_count;
+   unsigned i;
 
spin_lock(>lru_lock);
-   list_for_each_entry(bo, >lru, lru) {
-   ret = __ttm_bo_reserve(bo, false, true, NULL);
-   if (ret)
-   continue;
+   for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+   list_for_each_entry(bo, >lru[i], lru) {
+   ret = __ttm_bo_reserve(bo, false, true, NULL);
+   if (ret)
+   continue;
 
-   if (place && !bdev->driver->eviction_valuable(bo, place)) {
-   __ttm_bo_unreserve(bo);
-   ret = -EBUSY;
-   continue;
+   if (place && !bdev->driver->eviction_valuable(bo,
+ place)) {
+   __ttm_bo_unreserve(bo);
+   ret = -EBUSY;
+   continue;
+   }
+
+   break;
}
 
-   break;
+   if (!ret)
+   break;
}
 
if (ret) {
@@ -1197,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
}
atomic_inc(>glob->bo_count);
drm_vma_node_reset(>vma_node);
+   bo->priority = 0;
 
/*
 * For ttm_bo_type_device buffers, allocate
@@ -1297,18 +1305,21 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device 
*bdev,
struct ttm_bo_global *glob = bdev->glob;
struct fence *fence;
int ret;
+   unsigned i;
 
/*
 * Can't use standard list traversal since we're unlocking.
 */
 
spin_lock(>lru_lock);
-   while (!list_empty(>lru)) {
-   spin_unlock(>lru_lock);
-   ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
-   if (ret)
-   return ret;
-   spin_lock(>lru_lock);
+   for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+   while (!list_empty(>lru[i])) {
+   spin_unlock(>lru_lock);
+   ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, 
false);
+   if (ret)
+   return ret;
+   spin_lock(>lru_lock);
+   }
}
spin_unlock(>lru_lock);
 
@@ -1385,6 +1396,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned 
type,
 {
int ret = -EINVAL;
struct ttm_mem_type_manager *man;
+   unsigned i;
 
BUG_ON(type >= TTM_NUM_MEM_TYPES);
man = >man[type];
@@ -1410,7 +1422,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned 
type,
man->use_type = true;
man->size = p_size;
 
-