[AMD Official Use Only - AMD Internal Distribution Only]


From: Chen, Xiaogang <xiaogang.c...@amd.com>
Sent: Tuesday, January 7, 2025 8:20 AM
To: Yang, Philip <philip.y...@amd.com>; Deng, Emily <emily.d...@amd.com>; 
amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdkfd: Fix partial migrate issue



On 1/6/2025 5:50 PM, Philip Yang wrote:


On 2025-01-02 19:06, Emily Deng wrote:

For partial migrate from ram to vram, the migrate->cpages is not

equal to migrate->npages, should use migrate->npages to check all needed

migrate pages which could be copied or not.



And only need to set those pages could be migrated to migrate->dst[i], or

the migrate_vma_pages will migrate the wrong pages based on the migrate->dst[i].



Signed-off-by: Emily Deng <emily.d...@amd.com><mailto:emily.d...@amd.com>

---

 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 8 ++++----

 1 file changed, 4 insertions(+), 4 deletions(-)



diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c

index 4b275937d05e..5c96c2d425e3 100644

--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c

+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c

@@ -278,7 +278,7 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct 
svm_range *prange,

                   struct migrate_vma *migrate, struct dma_fence **mfence,

                   dma_addr_t *scratch, uint64_t ttm_res_offset)

 {

- uint64_t npages = migrate->cpages;

+ uint64_t npages = migrate->npages;

As partial migration size is based on prange granularity, by default 2MB, maybe 
always migrate->cpages equal to migrate->npages, that's why we didn't trigger 
this bug. Wondering how do you catch this bug? This bug will leak svm_bo too, 
as svm_migrate_get_vram_page reference counter is incorrect.

The commit message is somehow confusing. It is not partial migration from page 
fault recovery. It is the case that migrate->cpages != migrate->npages after 
migrate_vma_setup due to some pages cannot be moved for some reasons. For that 
only part of src pages can be migrated. Ex: some system ram pages got locked by 
kernel. Usually this case does not happen during normal tests.

Regards

Xiaogang

You are right, issue happens while some system ram pages are swapped out and 
couldn’t be migrated. But it is only one reason. It has lots of reason that 
migrate->cpages != migrate->npages. Anyway, it is not full migrate, and just 
migrate partial pages. So, I think partial migrate will be more suitable for.

  struct amdgpu_device *adev = node->adev;

  struct device *dev = adev->dev;

  struct amdgpu_res_cursor cursor;

@@ -299,9 +299,6 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct 
svm_range *prange,

          struct page *spage;



          dst[i] = cursor.start + (j << PAGE_SHIFT);

-         migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);

-  svm_migrate_get_vram_page(prange, migrate->dst[i]);

-         migrate->dst[i] = migrate_pfn(migrate->dst[i]);



          spage = migrate_pfn_to_page(migrate->src[i]);

          if (spage && !is_zone_device_page(spage)) {

@@ -345,6 +342,9 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct 
svm_range *prange,

          } else {

                  j++;

          }

+         migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);

+  svm_migrate_get_vram_page(prange, migrate->dst[i]);

+         migrate->dst[i] = migrate_pfn(migrate->dst[i]);

This should move forward, to handle the corner case to migrate 1 page to the 
last page of VRAM res cursor.

Please check this change, to add mpages accounting to break the loop earlier.

-       uint64_t npages = migrate->cpages;
+       uint64_t npages = migrate->npages;
        struct amdgpu_device *adev = node->adev;
        struct device *dev = adev->dev;
        struct amdgpu_res_cursor cursor;
+       uint64_t mpages = 0;
        dma_addr_t *src;
        uint64_t *dst;
        uint64_t i, j;
@@ -295,14 +296,9 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct 
svm_range *prange,

        amdgpu_res_first(prange->ttm_res, ttm_res_offset,
                         npages << PAGE_SHIFT, &cursor);
-       for (i = j = 0; i < npages; i++) {
+       for (i = j = 0; i < npages && mpages < migrate->cpages; i++) {
                struct page *spage;

-               dst[i] = cursor.start + (j << PAGE_SHIFT);
-               migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
-               svm_migrate_get_vram_page(prange, migrate->dst[i]);
-               migrate->dst[i] = migrate_pfn(migrate->dst[i]);
-
                spage = migrate_pfn_to_page(migrate->src[i]);
                if (spage && !is_zone_device_page(spage)) {
                        src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
@@ -322,6 +318,7 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct 
svm_range *prange,
                                                mfence);
                                if (r)
                                        goto out_free_vram_pages;
+                               mpages += j;
                                amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
                                j = 0;
                        } else {
@@ -333,6 +330,11 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct 
svm_range *prange,
                pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
                                     src[i] >> PAGE_SHIFT, page_to_pfn(spage));

+               dst[i] = cursor.start + (j << PAGE_SHIFT);
+               migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
+               svm_migrate_get_vram_page(prange, migrate->dst[i]);
+               migrate->dst[i] = migrate_pfn(migrate->dst[i]);
+
                if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
                        r = svm_migrate_copy_memory_gart(adev, src + i - j,
                                                         dst + i - j, j + 1,
@@ -340,6 +342,7 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct 
svm_range *prange,
                                                         mfence);
                        if (r)
                                goto out_free_vram_pages;
+                       mpages += j + 1;
                        amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
                        j = 0;
                } else {
(END)
@@ -322,6 +318,7 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct 
svm_range *prange,
                                                mfence);
                                if (r)
                                        goto out_free_vram_pages;
+                               mpages += j;
                                amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
                                j = 0;
                        } else {
@@ -333,6 +330,11 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct 
svm_range *prange,
                pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
                                     src[i] >> PAGE_SHIFT, page_to_pfn(spage));

+               dst[i] = cursor.start + (j << PAGE_SHIFT);
+               migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
+               svm_migrate_get_vram_page(prange, migrate->dst[i]);
+               migrate->dst[i] = migrate_pfn(migrate->dst[i]);
+
                if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
                        r = svm_migrate_copy_memory_gart(adev, src + i - j,
                                                         dst + i - j, j + 1,
@@ -340,6 +342,7 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct 
svm_range *prange,
                                                         mfence);
                        if (r)
                                goto out_free_vram_pages;
+                       mpages += j + 1;
                        amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
                        j = 0;
                } else {

  }



  r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,

Reply via email to