Am 2021-05-06 um 11:36 p.m. schrieb Alex Sierra:
> [Why]
> svm ranges can have mixed pages from device or system memory.
> A good example is, after a prange has been allocated in VRAM and a
> copy-on-write is triggered by a fork. This invalidates some pages
> inside the prange. Endding up in mixed pages.
>
> [How]
> By classifying each page inside a prange, based on its type. Device or
> system memory, during dma mapping call. If page corresponds
> to VRAM domain, a flag is set to its dma_addr entry for each GPU.
> Then, at the GPU page table mapping. All group of contiguous pages within
> the same type are mapped with their proper pte flags.
>
> Signed-off-by: Alex Sierra <alex.sie...@amd.com>
> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 51 ++++++++++++++++++----------
>  drivers/gpu/drm/amd/amdkfd/kfd_svm.h |  1 +
>  2 files changed, 35 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
> b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> index 940165f4437d..1440e4e555f1 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> @@ -141,6 +141,12 @@ svm_range_dma_map_dev(struct device *dev, dma_addr_t 
> **dma_addr,
>                       dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
>  
>               page = hmm_pfn_to_page(hmm_pfns[i]);
> +             if (is_zone_device_page(page)) {
> +                     addr[i] = hmm_pfns[i] << PAGE_SHIFT;
> +                     addr[i] |= SVM_RANGE_VRAM_DOMAIN;
> +                     pr_debug("vram address detected: 0x%llx\n", addr[i] >> 
> PAGE_SHIFT);
> +                     continue;
> +             }
>               addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
>               r = dma_mapping_error(dev, addr[i]);
>               if (r) {
> @@ -1131,32 +1137,43 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, 
> struct amdgpu_vm *vm,
>                    struct amdgpu_device *bo_adev, struct dma_fence **fence)
>  {
>       struct amdgpu_bo_va bo_va;
> +     struct ttm_resource *ttm_res;
>       uint64_t pte_flags;
> +     unsigned long last_start;
> +     int last_domain;
>       int r = 0;
> +     int64_t i;
>  
>       pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
>                prange->last);
>  
> -     if (prange->svm_bo && prange->ttm_res) {
> +     if (prange->svm_bo && prange->ttm_res)
>               bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
> -             prange->mapping.bo_va = &bo_va;
> -     }
>  
> -     prange->mapping.start = prange->start;
> -     prange->mapping.last = prange->last;
> -     prange->mapping.offset = prange->offset;
> -     pte_flags = svm_range_get_pte_flags(adev, prange);
> +     last_start = prange->start;
> +     for (i = 0; i < prange->npages; i++) {
> +             last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
> +             if ((prange->start + i) < prange->last &&
> +                 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
> +                     continue;
>  
> -     r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, NULL,
> -                                     prange->mapping.start,
> -                                     prange->mapping.last, pte_flags,
> -                                     prange->mapping.offset,
> -                                     prange->ttm_res ?
> -                                             prange->ttm_res->mm_node : NULL,
> -                                     dma_addr, &vm->last_update);
> -     if (r) {
> -             pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
> -             goto out;
> +             pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
> +                      last_start, prange->start + i, last_domain ? "GPU" : 
> "CPU");
> +             ttm_res = last_domain ? prange->ttm_res : NULL;
> +             pte_flags = svm_range_get_pte_flags(adev, prange);
> +             r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, 
> false, NULL,
> +                                             last_start,
> +                                             prange->start + i, pte_flags,
> +                                             prange->offset +
> +                                             ((last_start - prange->start) 
> << PAGE_SHIFT),
> +                                             ttm_res ? ttm_res->mm_node : 
> NULL,
> +                                             ttm_res ? NULL : dma_addr,
> +                                             &vm->last_update);
> +             if (r) {
> +                     pr_debug("failed %d to map to gpu 0x%lx\n", r, 
> prange->start);
> +                     goto out;
> +             }
> +             last_start += prange->start + i + 1;

This looks wrong. It's either

    last_start += i + 1;

or

    last_start = prange->start + i + 1;

With that fixed, the series is
Reviewed-by: Felix Kuehling <felix.kuehl...@amd.com>


>       }
>  
>       r = amdgpu_vm_update_pdes(adev, vm, false);
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h 
> b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
> index 08542fe39303..e68aa51322df 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
> @@ -35,6 +35,7 @@
>  #include "amdgpu.h"
>  #include "kfd_priv.h"
>  
> +#define SVM_RANGE_VRAM_DOMAIN (1UL << 0)
>  #define SVM_ADEV_PGMAP_OWNER(adev)\
>                       ((adev)->hive ? (void *)(adev)->hive : (void *)(adev))
>  
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to