Am Samstag, dem 05.10.2024 um 03:42 +0800 schrieb Sui Jingfeng:
> Since the GPU VA space is compact in terms of 4KiB unit, map and/or unmap
> the area that doesn't belong to a context breaks the philosophy of PPAS.
> That results in severe errors: GPU hang and MMU fault (page not present)
> and such.
> 
> Shrink the usuable size of etnaviv GEM buffer object to its user size,
> instead of the original physical size of its backing memory.
> 
> Signed-off-by: Sui Jingfeng <sui.jingf...@linux.dev>
> ---
>  drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 28 +++++++++------------------
>  1 file changed, 9 insertions(+), 19 deletions(-)
> 
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c 
> b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> index 6fbc62772d85..a52ec5eb0e3d 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> @@ -70,8 +70,10 @@ static int etnaviv_context_map(struct 
> etnaviv_iommu_context *context,
>  }
>  
>  static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
> +                          unsigned int user_len,
>                            struct sg_table *sgt, int prot)
> -{    struct scatterlist *sg;
> +{
> +     struct scatterlist *sg;
>       unsigned int da = iova;
>       unsigned int i;
>       int ret;
> @@ -81,7 +83,8 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context 
> *context, u32 iova,
>  
>       for_each_sgtable_dma_sg(sgt, sg, i) {
>               phys_addr_t pa = sg_dma_address(sg) - sg->offset;
> -             size_t bytes = sg_dma_len(sg) + sg->offset;
> +             unsigned int phys_len = sg_dma_len(sg) + sg->offset;
> +             size_t bytes = MIN(phys_len, user_len);
>  
>               VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
>  
> @@ -89,6 +92,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context 
> *context, u32 iova,
>               if (ret)
>                       goto fail;
>  
> +             user_len -= bytes;
>               da += bytes;
>       }

Since the MIN(phys_len, user_len) may limit the mapped amount in the
wrong direction, I would think it would be good to add a
WARN_ON(user_len != 0) after the dma SG iteration.

>  
> @@ -104,21 +108,7 @@ static int etnaviv_iommu_map(struct 
> etnaviv_iommu_context *context, u32 iova,
>  static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 
> iova,
>                               struct sg_table *sgt, unsigned len)
>  {
> -     struct scatterlist *sg;
> -     unsigned int da = iova;
> -     int i;
> -
> -     for_each_sgtable_dma_sg(sgt, sg, i) {
> -             size_t bytes = sg_dma_len(sg) + sg->offset;
> -
> -             etnaviv_context_unmap(context, da, bytes);
> -
> -             VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
> -
> -             BUG_ON(!PAGE_ALIGNED(bytes));
> -
> -             da += bytes;
> -     }
> +     etnaviv_context_unmap(context, iova, len);

This drops some sanity checks, but I have only ever seen them fire when
we had other kernel memory corruption issues, so I'm fine with the
simplification you did here.

Regards,
Lucas

>  
>       context->flush_seq++;
>  }
> @@ -131,7 +121,7 @@ static void etnaviv_iommu_remove_mapping(struct 
> etnaviv_iommu_context *context,
>       lockdep_assert_held(&context->lock);
>  
>       etnaviv_iommu_unmap(context, mapping->vram_node.start,
> -                         etnaviv_obj->sgt, etnaviv_obj->base.size);
> +                         etnaviv_obj->sgt, etnaviv_obj->user_size);
>       drm_mm_remove_node(&mapping->vram_node);
>  }
>  
> @@ -314,7 +304,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context 
> *context,
>               goto unlock;
>  
>       mapping->iova = node->start;
> -     ret = etnaviv_iommu_map(context, node->start, sgt,
> +     ret = etnaviv_iommu_map(context, node->start, user_size, sgt,
>                               ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
>  
>       if (ret < 0) {

Reply via email to