Am 03.05.24 um 10:23 schrieb Tvrtko Ursulin:

On 03/05/2024 07:26, Christian König wrote:
Am 29.04.24 um 18:47 schrieb Tvrtko Ursulin:
From: Tvrtko Ursulin <tvrtko.ursu...@igalia.com>

Help code readability by replacing a bunch of:

bo->tbo.base.resv == vm->root.bo->tbo.base.resv

With:

amdgpu_vm_is_bo_always_valid(vm, bo)

No functional changes.

v2:
  * Rename helper and move to amdgpu_vm. (Christian)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursu...@igalia.com>
Cc: Christian König <christian.koe...@amd.com>
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c |  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c  | 40 +++++++++++++++----------
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h  |  2 ++
  3 files changed, 28 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 67c234bcf89f..e698d65e9508 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -174,7 +174,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
          return -EPERM;
      if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
-        abo->tbo.base.resv != vm->root.bo->tbo.base.resv)
+        !amdgpu_vm_is_bo_always_valid(vm, abo))
          return -EPERM;
      r = amdgpu_bo_reserve(abo, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8af3f0fd3073..01ca4b35b369 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -333,7 +333,7 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
      base->next = bo->vm_bo;
      bo->vm_bo = base;
-    if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
+    if (!amdgpu_vm_is_bo_always_valid(vm, bo))
          return;
      dma_resv_assert_held(vm->root.bo->tbo.base.resv);
@@ -1101,13 +1101,13 @@ static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
       * For now ignore BOs which are currently locked and potentially
       * changing their location.
       */
-    if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv &&
+    if (!amdgpu_vm_is_bo_always_valid(vm, bo) &&
          !dma_resv_trylock(bo->tbo.base.resv))
          return;
      amdgpu_bo_get_memory(bo, stats);
-    if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
-        dma_resv_unlock(bo->tbo.base.resv);
+    if (amdgpu_vm_is_bo_always_valid(vm, bo))
+        dma_resv_unlock(bo->tbo.base.resv);
  }
  void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
@@ -1203,8 +1203,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
          uncached = false;
      }
-    if (clear || (bo && bo->tbo.base.resv ==
-              vm->root.bo->tbo.base.resv))
+    if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
          last_update = &vm->last_update;
      else
          last_update = &bo_va->last_pt_update;
@@ -1246,7 +1245,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
       * the evicted list so that it gets validated again on the
       * next command submission.
       */
-    if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
+    if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
          uint32_t mem_type = bo->tbo.resource->mem_type;
          if (!(bo->preferred_domains &
@@ -1640,10 +1639,9 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
      if (mapping->flags & AMDGPU_PTE_PRT)
          amdgpu_vm_prt_get(adev);
-    if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
-        !bo_va->base.moved) {
+    if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
          amdgpu_vm_bo_moved(&bo_va->base);
-    }
+
      trace_amdgpu_vm_bo_map(bo_va, mapping);
  }
@@ -1922,7 +1920,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
          if (before->flags & AMDGPU_PTE_PRT)
              amdgpu_vm_prt_get(adev);
-        if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
+        if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
              !before->bo_va->base.moved)
amdgpu_vm_bo_moved(&before->bo_va->base);
      } else {
@@ -1937,7 +1935,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
          if (after->flags & AMDGPU_PTE_PRT)
              amdgpu_vm_prt_get(adev);
-        if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
+        if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
              !after->bo_va->base.moved)
amdgpu_vm_bo_moved(&after->bo_va->base);
      } else {
@@ -2017,7 +2015,7 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
      if (bo) {
          dma_resv_assert_held(bo->tbo.base.resv);
-        if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
+        if (amdgpu_vm_is_bo_always_valid(vm, bo))
              ttm_bo_set_bulk_move(&bo->tbo, NULL);
          for (base = &bo_va->base.bo->vm_bo; *base;
@@ -2111,7 +2109,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
      for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
          struct amdgpu_vm *vm = bo_base->vm;
-        if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
+        if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
              amdgpu_vm_bo_evicted(bo_base);
              continue;
          }
@@ -2122,7 +2120,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
          if (bo->tbo.type == ttm_bo_type_kernel)
              amdgpu_vm_bo_relocated(bo_base);
-        else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
+        else if (amdgpu_vm_is_bo_always_valid(vm, bo))
              amdgpu_vm_bo_moved(bo_base);
          else
              amdgpu_vm_bo_invalidated(bo_base);
@@ -2986,3 +2984,15 @@ void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
      xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
  }
+/**
+ * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
+ *
+ * @vm: VM to test against.
+ * @abo: BO to be tested.
+ *
+ * Returns true if the BO is VM always valid.

Maybe improve that a bit, e.g. something like this:

"Returns true if the BO shares the dma_resv object with the root PD and is always guaranteed to be valid inside the VM."

I am only unsure if the dma_resv and root PD are too much of an implementation details? Or really something the API user must know?

Yeah that's exactly the reason why I wanted to add this note.

It is an implementation detail, but everybody working on the kernel code needs to keep that in the back of their minds.

Background is that sharing the dma_resv object has some consequences on synchronization for example you can't ignore.

Considering from the uapi we have this:

/* Flag that BO is always valid in this VM */
#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID    (1 << 6)

Which is the reason I thought to keep the comment high level.

It shouldn't matter for the UAPI, but we just need to keep it in mind inside the kernel.

Regards,
Christian.


Give me a final verdict and I can change it accordingly.

Regards,

Tvrtko

With that done the patch is Reviewed-by: Christian König <christian.koe...@amd.com>

Thanks,
Christian.

+ */
+bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
+{
+    return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 54d7da396de0..ec688a47dec1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -561,6 +561,8 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);   int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm); +bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo);
+
  /**
   * amdgpu_vm_tlb_seq - return tlb flush sequence number
   * @vm: the amdgpu_vm structure to query


Reply via email to