From: Philip Yang <philip.y...@amd.com>

Use HMM to get system memory pages address, which will be used to
map to GPUs or migrate to vram.

Signed-off-by: Philip Yang <philip.y...@amd.com>
Signed-off-by: Felix Kuehling <felix.kuehl...@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 103 ++++++++++++++++++++++++++-
 drivers/gpu/drm/amd/amdkfd/kfd_svm.h |   4 ++
 2 files changed, 106 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index e57103a9025e..6024caf7373f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -28,6 +28,15 @@
 #include "kfd_priv.h"
 #include "kfd_svm.h"
 
+static bool
+svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
+                                   const struct mmu_notifier_range *range,
+                                   unsigned long cur_seq);
+
+static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
+       .invalidate = svm_range_cpu_invalidate_pagetables,
+};
+
 /**
  * svm_range_unlink - unlink svm_range from lists and interval tree
  * @prange: svm range structure to be removed
@@ -46,6 +55,18 @@ static void svm_range_unlink(struct svm_range *prange)
                interval_tree_remove(&prange->it_node, &prange->svms->objects);
 }
 
+static void
+svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
+{
+       pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
+                prange, prange->start, prange->last);
+
+       mmu_interval_notifier_insert_locked(&prange->notifier, mm,
+                                    prange->start << PAGE_SHIFT,
+                                    prange->npages << PAGE_SHIFT,
+                                    &svm_range_mn_ops);
+}
+
 /**
  * svm_range_add_to_svms - add svm range to svms
  * @prange: svm range structure to be added
@@ -65,6 +86,18 @@ static void svm_range_add_to_svms(struct svm_range *prange)
        interval_tree_insert(&prange->it_node, &prange->svms->objects);
 }
 
+static void svm_range_remove_notifier(struct svm_range *prange)
+{
+       pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
+                prange->svms, prange,
+                prange->notifier.interval_tree.start >> PAGE_SHIFT,
+                prange->notifier.interval_tree.last >> PAGE_SHIFT);
+
+       if (prange->notifier.interval_tree.start != 0 &&
+           prange->notifier.interval_tree.last != 0)
+               mmu_interval_notifier_remove(&prange->notifier);
+}
+
 static void svm_range_free(struct svm_range *prange)
 {
        pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
@@ -112,6 +145,56 @@ svm_range *svm_range_new(struct svm_range_list *svms, 
uint64_t start,
        return prange;
 }
 
+/**
+ * svm_range_validate_ram - get system memory pages of svm range
+ *
+ * @mm: the mm_struct of process
+ * @prange: the range struct
+ *
+ * After mapping system memory to GPU, system memory maybe invalidated anytime
+ * during application running, we use HMM callback to sync GPU with CPU page
+ * table update, so we don't need use lock to prevent CPU invalidation and 
check
+ * hmm_range_get_pages_done return value.
+ *
+ * Return:
+ * 0 - OK, otherwise error code
+ */
+static int
+svm_range_validate_ram(struct mm_struct *mm, struct svm_range *prange)
+{
+       int r;
+
+       r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
+                                      prange->start << PAGE_SHIFT,
+                                      prange->npages, &prange->hmm_range,
+                                      false, true);
+       if (r) {
+               pr_debug("failed %d to get svm range pages\n", r);
+               return r;
+       }
+
+       kvfree(prange->pages_addr);
+       prange->pages_addr = prange->hmm_range->hmm_pfns;
+       prange->hmm_range->hmm_pfns = NULL;
+
+       amdgpu_hmm_range_get_pages_done(prange->hmm_range);
+       prange->hmm_range = NULL;
+
+       return 0;
+}
+
+static int
+svm_range_validate(struct mm_struct *mm, struct svm_range *prange)
+{
+       int r = 0;
+
+       pr_debug("actual loc 0x%x\n", prange->actual_loc);
+
+       r = svm_range_validate_ram(mm, prange);
+
+       return r;
+}
+
 static int
 svm_range_check_attr(struct kfd_process *p,
                     uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
@@ -581,6 +664,18 @@ svm_range_handle_overlap(struct svm_range_list *svms, 
struct svm_range *new,
        return r;
 }
 
+/**
+ * svm_range_cpu_invalidate_pagetables - interval notifier callback
+ *
+ */
+static bool
+svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
+                                   const struct mmu_notifier_range *range,
+                                   unsigned long cur_seq)
+{
+       return true;
+}
+
 void svm_range_list_fini(struct kfd_process *p)
 {
        pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
@@ -732,6 +827,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, 
uint64_t size,
        /* Apply changes as a transaction */
        list_for_each_entry_safe(prange, next, &insert_list, insert_list) {
                svm_range_add_to_svms(prange);
+               svm_range_add_notifier_locked(mm, prange);
        }
        list_for_each_entry(prange, &update_list, update_list) {
                svm_range_apply_attrs(p, prange, nattr, attrs);
@@ -743,6 +839,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, 
uint64_t size,
                         prange->svms, prange, prange->start,
                         prange->last);
                svm_range_unlink(prange);
+               svm_range_remove_notifier(prange);
                svm_range_free(prange);
        }
 
@@ -753,7 +850,11 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, 
uint64_t size,
         * case because the rollback wouldn't be guaranteed to work either.
         */
        list_for_each_entry(prange, &update_list, update_list) {
-               /* TODO */
+               r = svm_range_validate(mm, prange);
+               if (r) {
+                       pr_debug("failed %d to validate svm range\n", r);
+                       break;
+               }
        }
 
        svm_range_debug_dump(svms);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index c46cdad9d1a3..f35a178c607f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -44,6 +44,7 @@
  * @update_list:link list node used to add to update_list
  * @remove_list:link list node used to add to remove list
  * @insert_list:link list node used to add to insert list
+ * @hmm_range:  hmm range structure used by hmm_range_fault to get system pages
  * @npages:     number of pages
  * @pages_addr: list of system memory physical page address
  * @flags:      flags defined as KFD_IOCTL_SVM_FLAG_*
@@ -51,6 +52,7 @@
  * @perfetch_loc: last prefetch location, 0 for CPU, or GPU id
  * @actual_loc: the actual location, 0 for CPU, or GPU id
  * @granularity:migration granularity, log2 num pages
+ * @notifier:   register mmu interval notifier
  * @bitmap_access: index bitmap of GPUs which can access the range
  * @bitmap_aip: index bitmap of GPUs which can access the range in place
  *
@@ -67,6 +69,7 @@ struct svm_range {
        struct list_head                update_list;
        struct list_head                remove_list;
        struct list_head                insert_list;
+       struct hmm_range                *hmm_range;
        uint64_t                        npages;
        unsigned long                   *pages_addr;
        uint32_t                        flags;
@@ -74,6 +77,7 @@ struct svm_range {
        uint32_t                        prefetch_loc;
        uint32_t                        actual_loc;
        uint8_t                         granularity;
+       struct mmu_interval_notifier    notifier;
        DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
        DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
 };
-- 
2.31.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to