On Fri, 14 Feb 2025 20:55:21 +0000
Adrián Larumbe <adrian.laru...@collabora.com> wrote:

> Commit 434e5ca5b5d7 ("drm/panthor: Expose size of driver internal BO's over
> fdinfo") locks the VMS xarray, to avoid UAF errors when the same VM is
> being concurrently destroyed by another thread. However, that puts the
> current thread in atomic context, which means taking the VMS' heap locks
> will trigger a warning as the thread is no longer allowed to sleep.
> 
> Because in this case replacing the heap mutex with a spinlock isn't
> feasible, the fdinfo handler no longer traverses the list of heaps for
> every single VM associated with an open DRM file. Instead, when a new heap
> chunk is allocated, its size is accumulated into a VM-wide tally, which
> also makes the atomic context code path somewhat faster.
> 
> Signed-off-by: Adrián Larumbe <adrian.laru...@collabora.com>
> Fixes: 3e2c8c718567 ("drm/panthor: Expose size of driver internal BO's over 
> fdinfo")
> ---
>  drivers/gpu/drm/panthor/panthor_heap.c | 38 ++++++++------------------
>  drivers/gpu/drm/panthor/panthor_heap.h |  2 --
>  drivers/gpu/drm/panthor/panthor_mmu.c  | 23 +++++++++++-----
>  drivers/gpu/drm/panthor/panthor_mmu.h  |  1 +
>  4 files changed, 28 insertions(+), 36 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_heap.c 
> b/drivers/gpu/drm/panthor/panthor_heap.c
> index db0285ce5812..e5e5953e4f87 100644
> --- a/drivers/gpu/drm/panthor/panthor_heap.c
> +++ b/drivers/gpu/drm/panthor/panthor_heap.c
> @@ -127,6 +127,8 @@ static void panthor_free_heap_chunk(struct panthor_vm *vm,
>       heap->chunk_count--;
>       mutex_unlock(&heap->lock);
>  
> +     panthor_vm_heaps_size_accumulate(vm, -heap->chunk_size);
> +
>       panthor_kernel_bo_destroy(chunk->bo);
>       kfree(chunk);
>  }
> @@ -180,6 +182,8 @@ static int panthor_alloc_heap_chunk(struct panthor_device 
> *ptdev,
>       heap->chunk_count++;
>       mutex_unlock(&heap->lock);
>  
> +     panthor_vm_heaps_size_accumulate(vm, heap->chunk_size);
> +
>       return 0;
>  
>  err_destroy_bo:
> @@ -389,6 +393,7 @@ int panthor_heap_return_chunk(struct panthor_heap_pool 
> *pool,
>                       removed = chunk;
>                       list_del(&chunk->node);
>                       heap->chunk_count--;
> +                     panthor_vm_heaps_size_accumulate(chunk->bo->vm, 
> -heap->chunk_size);
>                       break;
>               }
>       }
> @@ -560,6 +565,8 @@ panthor_heap_pool_create(struct panthor_device *ptdev, 
> struct panthor_vm *vm)
>       if (ret)
>               goto err_destroy_pool;
>  
> +     panthor_vm_heaps_size_accumulate(vm, pool->gpu_contexts->obj->size);
> +
>       return pool;
>  
>  err_destroy_pool:
> @@ -594,8 +601,11 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool 
> *pool)
>       xa_for_each(&pool->xa, i, heap)
>               drm_WARN_ON(&pool->ptdev->base, 
> panthor_heap_destroy_locked(pool, i));
>  
> -     if (!IS_ERR_OR_NULL(pool->gpu_contexts))
> +     if (!IS_ERR_OR_NULL(pool->gpu_contexts)) {
> +             panthor_vm_heaps_size_accumulate(pool->gpu_contexts->vm,
> +                                         -pool->gpu_contexts->obj->size);
>               panthor_kernel_bo_destroy(pool->gpu_contexts);
> +     }
>  
>       /* Reflects the fact the pool has been destroyed. */
>       pool->vm = NULL;
> @@ -603,29 +613,3 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool 
> *pool)
>  
>       panthor_heap_pool_put(pool);
>  }
> -
> -/**
> - * panthor_heap_pool_size() - Calculate size of all chunks across all heaps 
> in a pool
> - * @pool: Pool whose total chunk size to calculate.
> - *
> - * This function adds the size of all heap chunks across all heaps in the
> - * argument pool. It also adds the size of the gpu contexts kernel bo.
> - * It is meant to be used by fdinfo for displaying the size of internal
> - * driver BO's that aren't exposed to userspace through a GEM handle.
> - *
> - */
> -size_t panthor_heap_pool_size(struct panthor_heap_pool *pool)
> -{
> -     struct panthor_heap *heap;
> -     unsigned long i;
> -     size_t size = 0;
> -
> -     down_read(&pool->lock);
> -     xa_for_each(&pool->xa, i, heap)
> -             size += heap->chunk_size * heap->chunk_count;
> -     up_read(&pool->lock);
> -
> -     size += pool->gpu_contexts->obj->size;
> -
> -     return size;
> -}
> diff --git a/drivers/gpu/drm/panthor/panthor_heap.h 
> b/drivers/gpu/drm/panthor/panthor_heap.h
> index e3358d4e8edb..25a5f2bba445 100644
> --- a/drivers/gpu/drm/panthor/panthor_heap.h
> +++ b/drivers/gpu/drm/panthor/panthor_heap.h
> @@ -27,8 +27,6 @@ struct panthor_heap_pool *
>  panthor_heap_pool_get(struct panthor_heap_pool *pool);
>  void panthor_heap_pool_put(struct panthor_heap_pool *pool);
>  
> -size_t panthor_heap_pool_size(struct panthor_heap_pool *pool);
> -
>  int panthor_heap_grow(struct panthor_heap_pool *pool,
>                     u64 heap_gpu_va,
>                     u32 renderpasses_in_flight,
> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c 
> b/drivers/gpu/drm/panthor/panthor_mmu.c
> index 8c6fc587ddc3..9e48b34fcf80 100644
> --- a/drivers/gpu/drm/panthor/panthor_mmu.c
> +++ b/drivers/gpu/drm/panthor/panthor_mmu.c
> @@ -347,6 +347,14 @@ struct panthor_vm {
>               struct mutex lock;
>       } heaps;
>  
> +     /**
> +      * @fdinfo: VM-wide fdinfo fields.
> +      */
> +     struct {
> +             /** @fdinfo.heaps_size: Size of all chunks across all heaps in 
> the pool. */
> +             atomic_t heaps_size;
> +     } fdinfo;

Feels more like a panthor_heap_pool field to me. If you do that,
you can keep the panthor_heap_pool_size() helper.

> +
>       /** @node: Used to insert the VM in the panthor_mmu::vm::list. */
>       struct list_head node;
>  
> @@ -1541,6 +1549,8 @@ static void panthor_vm_destroy(struct panthor_vm *vm)
>       vm->heaps.pool = NULL;
>       mutex_unlock(&vm->heaps.lock);
>  
> +     atomic_set(&vm->fdinfo.heaps_size, 0);
> +

I don't think that's needed, the VM is gone, so there's no way
someone can query its heaps size after that point.

>       drm_WARN_ON(&vm->ptdev->base,
>                   panthor_vm_unmap_range(vm, vm->base.mm_start, 
> vm->base.mm_range));
>       panthor_vm_put(vm);
> @@ -1963,13 +1973,7 @@ void panthor_vm_heaps_sizes(struct panthor_file 
> *pfile, struct drm_memory_stats
>  
>       xa_lock(&pfile->vms->xa);
>       xa_for_each(&pfile->vms->xa, i, vm) {
> -             size_t size = 0;
> -
> -             mutex_lock(&vm->heaps.lock);
> -             if (vm->heaps.pool)
> -                     size = panthor_heap_pool_size(vm->heaps.pool);
> -             mutex_unlock(&vm->heaps.lock);
> -
> +             size_t size = atomic_read(&vm->fdinfo.heaps_size);
>               stats->resident += size;
>               if (vm->as.id >= 0)
>                       stats->active += size;
> @@ -1977,6 +1981,11 @@ void panthor_vm_heaps_sizes(struct panthor_file 
> *pfile, struct drm_memory_stats
>       xa_unlock(&pfile->vms->xa);
>  }
>  
> +void panthor_vm_heaps_size_accumulate(struct panthor_vm *vm, ssize_t acc)
> +{
> +     atomic_add(acc, &vm->fdinfo.heaps_size);
> +}

Calling atomic_add() directly would probably be shorter, and I prefer
the idea of calling atomic_sub(size) instead of atomic_add(-size), so
how about we drop this helper and use atomic_add/sub() directly?

> +
>  static u64 mair_to_memattr(u64 mair, bool coherent)
>  {
>       u64 memattr = 0;
> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h 
> b/drivers/gpu/drm/panthor/panthor_mmu.h
> index fc274637114e..29030384eafe 100644
> --- a/drivers/gpu/drm/panthor/panthor_mmu.h
> +++ b/drivers/gpu/drm/panthor/panthor_mmu.h
> @@ -39,6 +39,7 @@ struct panthor_heap_pool *
>  panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
>  
>  void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct 
> drm_memory_stats *stats);
> +void panthor_vm_heaps_size_accumulate(struct panthor_vm *vm, ssize_t acc);
>  
>  struct panthor_vm *panthor_vm_get(struct panthor_vm *vm);
>  void panthor_vm_put(struct panthor_vm *vm);

Reply via email to