On Mon, Jul 14, 2025 at 03:18:17PM +1000, Dave Airlie wrote:
> From: Dave Airlie <airl...@redhat.com>
> 
> This uses the newly introduced per-node gpu tracking stats,
> to track GPU memory allocated via TTM and reclaimable memory in
> the TTM page pools.
> 
> These stats will be useful later for system information and
> later when mem cgroups are integrated.
> 
> Cc: Christian Koenig <christian.koe...@amd.com>
> Cc: Matthew Brost <matthew.br...@intel.com>
> Cc: Johannes Weiner <han...@cmpxchg.org>
> Cc: linux...@kvack.org
> Cc: Andrew Morton <a...@linux-foundation.org>
> Signed-off-by: Dave Airlie <airl...@redhat.com>
> 
> ---
> v2: add reclaim parameters and adjust the right counters.
> v3: drop the nid helper and get it from page.
> ---
>  drivers/gpu/drm/ttm/ttm_pool.c | 25 +++++++++++++++++++------
>  1 file changed, 19 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
> index baf27c70a419..ee2344089d47 100644
> --- a/drivers/gpu/drm/ttm/ttm_pool.c
> +++ b/drivers/gpu/drm/ttm/ttm_pool.c
> @@ -150,8 +150,10 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool 
> *pool, gfp_t gfp_flags,
>  
>       if (!pool->use_dma_alloc) {
>               p = alloc_pages_node(pool->nid, gfp_flags, order);
> -             if (p)
> +             if (p) {
>                       p->private = order;
> +                     mod_node_page_state(NODE_DATA(page_to_nid(p)), 
> NR_GPU_ACTIVE, (1 << order));

Please use mod_lruvec_page_state() here.

> +             }
>               return p;
>       }
>  
> @@ -186,7 +188,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool 
> *pool, gfp_t gfp_flags,
>  
>  /* Reset the caching and pages of size 1 << order */
>  static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching 
> caching,
> -                            unsigned int order, struct page *p)
> +                            unsigned int order, struct page *p, bool reclaim)
>  {
>       unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
>       struct ttm_pool_dma *dma;
> @@ -201,6 +203,9 @@ static void ttm_pool_free_page(struct ttm_pool *pool, 
> enum ttm_caching caching,
>  #endif
>  
>       if (!pool || !pool->use_dma_alloc) {
> +             mod_node_page_state(NODE_DATA(page_to_nid(p)),
> +                                 reclaim ? NR_GPU_RECLAIM : NR_GPU_ACTIVE,
> +                                 -(1 << order));

Same here.

>               __free_pages(p, order);
>               return;
>       }
> @@ -276,6 +281,7 @@ static void ttm_pool_unmap(struct ttm_pool *pool, 
> dma_addr_t dma_addr,
>  static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
>  {
>       unsigned int i, num_pages = 1 << pt->order;
> +     int nid = page_to_nid(p);
>  
>       for (i = 0; i < num_pages; ++i) {
>               if (PageHighMem(p))
> @@ -288,17 +294,24 @@ static void ttm_pool_type_give(struct ttm_pool_type 
> *pt, struct page *p)
>       list_add(&p->lru, &pt->pages);
>       spin_unlock(&pt->lock);
>       atomic_long_add(1 << pt->order, &allocated_pages);
> +
> +     mod_node_page_state(NODE_DATA(nid), NR_GPU_ACTIVE, -num_pages);
> +     mod_node_page_state(NODE_DATA(nid), NR_GPU_RECLAIM, num_pages);

Same here.

>  }
>  
>  /* Take pages from a specific pool_type, return NULL when nothing available 
> */
>  static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
>  {
>       struct page *p;
> +     int nid;
>  
>       spin_lock(&pt->lock);
>       p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
>       if (p) {
> +             nid = page_to_nid(p);
>               atomic_long_sub(1 << pt->order, &allocated_pages);
> +             mod_node_page_state(NODE_DATA(nid), NR_GPU_ACTIVE, (1 << 
> pt->order));
> +             mod_node_page_state(NODE_DATA(nid), NR_GPU_RECLAIM, -(1 << 
> pt->order));

Same here.

Reply via email to