Am 29.04.24 um 18:47 schrieb Tvrtko Ursulin:
From: Tvrtko Ursulin <tvrtko.ursu...@igalia.com>

All apart from AMDGPU_GEM_DOMAIN_GTT memory domains map 1:1 to TTM
placements. And the former be either AMDGPU_PL_PREEMPT or TTM_PL_TT,
depending on AMDGPU_GEM_CREATE_PREEMPTIBLE.

Simplify a few places in the code which convert the TTM placement into
a domain by checking against the current placement directly.

In the conversion AMDGPU_PL_PREEMPT either does not have to be handled
because amdgpu_mem_type_to_domain() cannot return that value anyway.

v2:
  * Remove AMDGPU_PL_PREEMPT handling.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursu...@igalia.com>
Reviewed-by: Christian König <christian.koe...@amd.com> # v1
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c |  3 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c  | 27 +++++++++------------
  2 files changed, 12 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 055ba2ea4c12..0b3b10d21952 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -165,8 +165,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct 
dma_buf_attachment *attach,
                if (r)
                        return ERR_PTR(r);
- } else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) &
-                    AMDGPU_GEM_DOMAIN_GTT)) {
+       } else if (bo->tbo.resource->mem_type != TTM_PL_TT) {
                return ERR_PTR(-EBUSY);
        }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 8bc79924d171..eb5bd6962560 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -976,12 +976,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 
domain,
ttm_bo_pin(&bo->tbo); - domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
-       if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+       if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
                atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
                atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
                             &adev->visible_pin_size);
-       } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+       } else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
                atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
        }
@@ -1280,7 +1279,6 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
  {
        uint64_t size = amdgpu_bo_size(bo);
        struct drm_gem_object *obj;
-       unsigned int domain;
        bool shared;
/* Abort if the BO doesn't currently have a backing store */
@@ -1290,21 +1288,20 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
        obj = &bo->tbo.base;
        shared = drm_gem_object_is_shared_for_memory_stats(obj);
- domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
-       switch (domain) {
-       case AMDGPU_GEM_DOMAIN_VRAM:
+       switch (bo->tbo.resource->mem_type) {
+       case TTM_PL_VRAM:
                stats->vram += size;
                if (amdgpu_bo_in_cpu_visible_vram(bo))
                        stats->visible_vram += size;
                if (shared)
                        stats->vram_shared += size;
                break;
-       case AMDGPU_GEM_DOMAIN_GTT:
+       case TTM_PL_TT:
                stats->gtt += size;
                if (shared)
                        stats->gtt_shared += size;
                break;
-       case AMDGPU_GEM_DOMAIN_CPU:
+       case TTM_PL_SYSTEM:
        default:
                stats->cpu += size;
                if (shared)
@@ -1317,7 +1314,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
                if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
                        stats->requested_visible_vram += size;
- if (domain != AMDGPU_GEM_DOMAIN_VRAM) {
+               if (bo->tbo.resource->mem_type != TTM_PL_VRAM) {
                        stats->evicted_vram += size;
                        if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
                                stats->evicted_visible_vram += size;
@@ -1592,19 +1589,17 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, 
struct seq_file *m)
        u64 size;
if (dma_resv_trylock(bo->tbo.base.resv)) {
-               unsigned int domain;
-               domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
-               switch (domain) {
-               case AMDGPU_GEM_DOMAIN_VRAM:
+               switch (bo->tbo.resource->mem_type) {
+               case TTM_PL_VRAM:
                        if (amdgpu_bo_in_cpu_visible_vram(bo))
                                placement = "VRAM VISIBLE";
                        else
                                placement = "VRAM";
                        break;
-               case AMDGPU_GEM_DOMAIN_GTT:
+               case TTM_PL_TT:
                        placement = "GTT";
                        break;
-               case AMDGPU_GEM_DOMAIN_CPU:
+               case TTM_PL_SYSTEM:

I would still prefer a AMDGPU_PL_PREEMPT here to be able to distinct those.

On the other hand OA, GWS and GDS placements are missing as well. So that switch should probably be fixed.

Either way the patch is Reviewed-by: Christian König <christian.koe...@amd.com> for now since it doesn't change the handling at all.

Regards,
Christian.

                default:
                        placement = "CPU";
                        break;

Reply via email to