From: Philip Yang <philip.y...@amd.com>

Use HMM to get system memory pages address, which will be used to
map to GPUs or migrate to vram.

Signed-off-by: Philip Yang <philip.y...@amd.com>
Signed-off-by: Felix Kuehling <felix.kuehl...@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h |  1 +
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c  | 88 +++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdkfd/kfd_svm.h  |  2 +
 3 files changed, 91 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index cbb2bae1982d..97cf267b6f51 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -735,6 +735,7 @@ struct svm_range_list {
        struct work_struct              srcu_free_work;
        struct list_head                free_list;
        struct mutex                    free_list_lock;
+       struct mmu_interval_notifier    notifier;
 };
 
 /* Process data */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 017e77e9ae1e..02918faa70d5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -135,6 +135,65 @@ svm_get_supported_dev_by_id(struct kfd_process *p, 
uint32_t gpu_id,
        return dev;
 }
 
+/**
+ * svm_range_validate_ram - get system memory pages of svm range
+ *
+ * @mm: the mm_struct of process
+ * @prange: the range struct
+ *
+ * After mapping system memory to GPU, system memory maybe invalidated anytime
+ * during application running, we use HMM callback to sync GPU with CPU page
+ * table update, so we don't need use lock to prevent CPU invalidation and 
check
+ * hmm_range_get_pages_done return value.
+ *
+ * Return:
+ * 0 - OK, otherwise error code
+ */
+static int
+svm_range_validate_ram(struct mm_struct *mm, struct svm_range *prange)
+{
+       uint64_t i;
+       int r;
+
+       if (!prange->pages_addr) {
+               prange->pages_addr = kvmalloc_array(prange->npages,
+                                               sizeof(*prange->pages_addr),
+                                               GFP_KERNEL | __GFP_ZERO);
+               if (!prange->pages_addr)
+                       return -ENOMEM;
+       }
+
+       r = amdgpu_hmm_range_get_pages(&prange->svms->notifier, mm, NULL,
+                                      prange->it_node.start << PAGE_SHIFT,
+                                      prange->npages, &prange->hmm_range,
+                                      false, true);
+       if (r) {
+               pr_debug("failed %d to get svm range pages\n", r);
+               return r;
+       }
+
+       for (i = 0; i < prange->npages; i++)
+               prange->pages_addr[i] =
+                       PFN_PHYS(prange->hmm_range->hmm_pfns[i]);
+
+       amdgpu_hmm_range_get_pages_done(prange->hmm_range);
+       prange->hmm_range = NULL;
+
+       return 0;
+}
+
+static int
+svm_range_validate(struct mm_struct *mm, struct svm_range *prange)
+{
+       int r = 0;
+
+       pr_debug("actual loc 0x%x\n", prange->actual_loc);
+
+       r = svm_range_validate_ram(mm, prange);
+
+       return r;
+}
+
 static int
 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
                      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
@@ -349,10 +408,28 @@ static void svm_range_srcu_free_work(struct work_struct 
*work_struct)
        mutex_unlock(&svms->free_list_lock);
 }
 
+/**
+ * svm_range_cpu_invalidate_pagetables - interval notifier callback
+ *
+ */
+static bool
+svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
+                                   const struct mmu_notifier_range *range,
+                                   unsigned long cur_seq)
+{
+       return true;
+}
+
+static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
+       .invalidate = svm_range_cpu_invalidate_pagetables,
+};
+
 void svm_range_list_fini(struct kfd_process *p)
 {
        pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
 
+       mmu_interval_notifier_remove(&p->svms.notifier);
+
        /* Ensure srcu free work is finished before process is destroyed */
        flush_work(&p->svms.srcu_free_work);
        cleanup_srcu_struct(&p->svms.srcu);
@@ -375,6 +452,8 @@ int svm_range_list_init(struct kfd_process *p)
        INIT_WORK(&svms->srcu_free_work, svm_range_srcu_free_work);
        INIT_LIST_HEAD(&svms->free_list);
        mutex_init(&svms->free_list_lock);
+       mmu_interval_notifier_insert(&svms->notifier, current->mm, 0, ~1ULL,
+                                    &svm_range_mn_ops);
 
        return 0;
 }
@@ -531,6 +610,15 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, 
uint64_t size,
                r = svm_range_apply_attrs(p, prange, nattr, attrs);
                if (r) {
                        pr_debug("failed %d to apply attrs\n", r);
+                       goto out_unlock;
+               }
+
+               r = svm_range_validate(mm, prange);
+               if (r)
+                       pr_debug("failed %d to validate svm range\n", r);
+
+out_unlock:
+               if (r) {
                        mmap_read_unlock(mm);
                        srcu_read_unlock(&prange->svms->srcu, srcu_idx);
                        goto out_remove;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index c7c54fb73dfb..4d394f72eefc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -41,6 +41,7 @@
  * @list:       link list node, used to scan all ranges of svms
  * @update_list:link list node used to add to update_list
  * @remove_list:link list node used to add to remove list
+ * @hmm_range:  hmm range structure used by hmm_range_fault to get system pages
  * @npages:     number of pages
  * @pages_addr: list of system memory physical page address
  * @flags:      flags defined as KFD_IOCTL_SVM_FLAG_*
@@ -61,6 +62,7 @@ struct svm_range {
        struct list_head                list;
        struct list_head                update_list;
        struct list_head                remove_list;
+       struct hmm_range                *hmm_range;
        uint64_t                        npages;
        dma_addr_t                      *pages_addr;
        uint32_t                        flags;
-- 
2.29.2

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to