GPU page tables are invalidated by unmapping prange directly at
the mmu notifier, when page fault retry is enabled through
amdgpu_noretry global parameter. The restore page table is
performed at the page fault handler.

If xnack is on, we update GPU mappings after migration to avoid
unnecessary GPUVM faults.

Signed-off-by: Alex Sierra <alex.sie...@amd.com>
Signed-off-by: Philip Yang <philip.y...@amd.com>
Signed-off-by: Felix Kuehling <felix.kuehl...@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c |  6 +-
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c     | 71 +++++++++++++++++++-----
 drivers/gpu/drm/amd/amdkfd/kfd_svm.h     |  4 +-
 3 files changed, 64 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 1243cf02f872..8ce3ff56a0ce 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -789,7 +789,11 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
        list_for_each_entry_safe(prange, next, &update_list, update_list) {
                enum svm_work_list_ops op;
 
-               op = SVM_OP_UPDATE_RANGE_NOTIFIER;
+               /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
+               if (p->xnack_enabled && prange == pmigrate)
+                       op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
+               else
+                       op = SVM_OP_UPDATE_RANGE_NOTIFIER;
 
                svm_range_add_list_work(&p->svms, prange, mm, op);
                list_del_init(&prange->update_list);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 69241ed4a377..fb8ca844d9bd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1006,6 +1006,13 @@ svm_range_split_by_granularity(struct kfd_process *p, 
struct mm_struct *mm,
 
        *pmigrate = new;
 
+       /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
+       if (p->xnack_enabled && (*pmigrate)->work_item.op == SVM_OP_ADD_RANGE) {
+               (*pmigrate)->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
+               pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
+                        *pmigrate, (*pmigrate)->start, (*pmigrate)->last,
+                        SVM_OP_ADD_RANGE_AND_MAP);
+       }
        return 0;
 }
 
@@ -1407,25 +1414,38 @@ svm_range_evict(struct svm_range *prange, struct 
mm_struct *mm,
                unsigned long start, unsigned long last)
 {
        struct svm_range_list *svms = prange->svms;
-       int invalid, evicted_ranges;
+       struct kfd_process *p;
        int r = 0;
 
-       invalid = atomic_inc_return(&prange->invalid);
-       evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
-       if (evicted_ranges != 1)
-               return r;
+       p = container_of(svms, struct kfd_process, svms);
 
-       pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
-                prange->svms, prange->start, prange->last);
+       pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
+                svms, prange->start, prange->last, start, last);
 
-       /* First eviction, stop the queues */
-       r = kgd2kfd_quiesce_mm(mm);
-       if (r)
-               pr_debug("failed to quiesce KFD\n");
+       if (!p->xnack_enabled) {
+               int invalid, evicted_ranges;
+
+               invalid = atomic_inc_return(&prange->invalid);
+               evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
+               if (evicted_ranges != 1)
+                       return r;
 
-       pr_debug("schedule to restore svm %p ranges\n", svms);
-       schedule_delayed_work(&svms->restore_work,
-               msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
+               pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
+                        prange->svms, prange->start, prange->last);
+
+               /* First eviction, stop the queues */
+               r = kgd2kfd_quiesce_mm(mm);
+               if (r)
+                       pr_debug("failed to quiesce KFD\n");
+
+               pr_debug("schedule to restore svm %p ranges\n", svms);
+               schedule_delayed_work(&svms->restore_work,
+                       msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
+       } else {
+               pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
+                        prange->svms, start, last);
+               svm_range_unmap_from_gpus(prange, start, last);
+       }
 
        return r;
 }
@@ -1621,6 +1641,7 @@ static void
 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange)
 {
        struct mm_struct *mm = prange->work_item.mm;
+       int r;
 
        switch (prange->work_item.op) {
        case SVM_OP_NULL:
@@ -1639,12 +1660,32 @@ svm_range_handle_list_op(struct svm_range_list *svms, 
struct svm_range *prange)
                         svms, prange, prange->start, prange->last);
                svm_range_update_notifier_and_interval_tree(mm, prange);
                break;
+       case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
+               pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
+                        svms, prange, prange->start, prange->last);
+               svm_range_update_notifier_and_interval_tree(mm, prange);
+
+               r = svm_range_map_to_gpus(prange, true);
+               if (r)
+                       pr_debug("failed %d map 0x%p [0x%lx 0x%lx]\n",
+                                r, svms, prange->start, prange->last);
+               break;
        case SVM_OP_ADD_RANGE:
                pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
                         prange->start, prange->last);
                svm_range_add_to_svms(prange);
                svm_range_add_notifier_locked(mm, prange);
                break;
+       case SVM_OP_ADD_RANGE_AND_MAP:
+               pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
+                        prange, prange->start, prange->last);
+               svm_range_add_to_svms(prange);
+               svm_range_add_notifier_locked(mm, prange);
+               r = svm_range_map_to_gpus(prange, true);
+               if (r)
+                       pr_debug("failed %d map 0x%p [0x%lx 0x%lx]\n",
+                                r, svms, prange->start, prange->last);
+               break;
        default:
                WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
                         prange->work_item.op);
@@ -2235,7 +2276,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, 
uint64_t size,
                if (r)
                        goto out_unlock_range;
 
-               if (migrated) {
+               if (migrated && !p->xnack_enabled) {
                        pr_debug("restore_work will update mappings of GPUs\n");
                        mutex_unlock(&prange->migrate_mutex);
                        continue;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index e6b737889bb3..3f945a601546 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -43,7 +43,9 @@ enum svm_work_list_ops {
        SVM_OP_NULL,
        SVM_OP_UNMAP_RANGE,
        SVM_OP_UPDATE_RANGE_NOTIFIER,
-       SVM_OP_ADD_RANGE
+       SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP,
+       SVM_OP_ADD_RANGE,
+       SVM_OP_ADD_RANGE_AND_MAP
 };
 
 struct svm_work_list_item {
-- 
2.31.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to