Otherwise GPU may access the stale mapping and generate IOMMU
IO_PAGE_FAULT.

Move this to inside p->mutex to prevent multiple threads mapping and
unmapping concurrently race condition.

After kfd_mem_dmaunmap_attachment is removed from unmap_bo_from_gpuvm,
kfd_mem_dmaunmap_attachment is called if failed to map to GPUs, and
before free the mem attachment in case failed to unmap from GPUs.

Signed-off-by: Philip Yang <philip.y...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h    |  1 +
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 22 +++++++++++++++---
 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c      | 23 ++++++++++++-------
 3 files changed, 35 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 559f14cc0a99..dff79a623f4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -304,6 +304,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct 
amdgpu_device *adev,
                                          struct kgd_mem *mem, void *drm_priv);
 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
                struct amdgpu_device *adev, struct kgd_mem *mem, void 
*drm_priv);
+void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct amdgpu_vm *vm, struct kgd_mem 
*mem);
 int amdgpu_amdkfd_gpuvm_sync_memory(
                struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index b5b940485059..ae767ad7afa2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1249,8 +1249,6 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
        amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
 
        amdgpu_sync_fence(sync, bo_va->last_pt_update);
-
-       kfd_mem_dmaunmap_attachment(mem, entry);
 }
 
 static int update_gpuvm_pte(struct kgd_mem *mem,
@@ -1305,6 +1303,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
 
 update_gpuvm_pte_failed:
        unmap_bo_from_gpuvm(mem, entry, sync);
+       kfd_mem_dmaunmap_attachment(mem, entry);
        return ret;
 }
 
@@ -1910,8 +1909,10 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
                mem->va + bo_size * (1 + mem->aql_queue));
 
        /* Remove from VM internal data structures */
-       list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
+       list_for_each_entry_safe(entry, tmp, &mem->attachments, list) {
+               kfd_mem_dmaunmap_attachment(mem, entry);
                kfd_mem_detach(entry);
+       }
 
        ret = unreserve_bo_and_vms(&ctx, false, false);
 
@@ -2085,6 +2086,21 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
        return ret;
 }
 
+void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct amdgpu_vm *vm, struct kgd_mem 
*mem)
+{
+       struct kfd_mem_attachment *entry;
+
+       mutex_lock(&mem->lock);
+
+       list_for_each_entry(entry, &mem->attachments, list) {
+               if (entry->bo_va->base.vm != vm)
+                       continue;
+               kfd_mem_dmaunmap_attachment(mem, entry);
+       }
+
+       mutex_unlock(&mem->lock);
+}
+
 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
                struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
 {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 08687ce0aa8b..645628ff1faf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1432,17 +1432,24 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file 
*filep,
                        goto sync_memory_failed;
                }
        }
-       mutex_unlock(&p->mutex);
 
-       if (flush_tlb) {
-               /* Flush TLBs after waiting for the page table updates to 
complete */
-               for (i = 0; i < args->n_devices; i++) {
-                       peer_pdd = kfd_process_device_data_by_id(p, 
devices_arr[i]);
-                       if (WARN_ON_ONCE(!peer_pdd))
-                               continue;
+       /* Flush TLBs after waiting for the page table updates to complete */
+       for (i = 0; i < args->n_devices; i++) {
+               struct amdgpu_vm *vm;
+
+               peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
+               if (WARN_ON_ONCE(!peer_pdd))
+                       continue;
+               if (flush_tlb)
                        kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
-               }
+
+               /* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
+               vm = drm_priv_to_vm(peer_pdd->drm_priv);
+               amdgpu_amdkfd_gpuvm_dmaunmap_mem(vm, mem);
        }
+
+       mutex_unlock(&p->mutex);
+
        kfree(devices_arr);
 
        return 0;
-- 
2.35.1

Reply via email to