From: Dave Airlie <airl...@redhat.com>

This enable NUMA awareness for the shrinker on the
ttm pools.

Cc: Christian Koenig <christian.koe...@amd.com>
Cc: Dave Chinner <da...@fromorbit.com>
Signed-off-by: Dave Airlie <airl...@redhat.com>
---
 drivers/gpu/drm/ttm/ttm_pool.c | 38 +++++++++++++++++++---------------
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index e26d57676fe3..c0391ccd179b 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -415,12 +415,12 @@ static struct ttm_pool_type *ttm_pool_select_type(struct 
ttm_pool *pool,
        return NULL;
 }
 
-/* Free pages using the global shrinker list */
-static unsigned int ttm_pool_shrink(void)
+/* Free pages using the per-node shrinker list */
+static unsigned int ttm_pool_shrink(int nid, unsigned long num_to_free)
 {
+       LIST_HEAD(dispose);
        struct ttm_pool_type *pt;
        unsigned int num_pages;
-       struct page *p;
 
        down_read(&pool_shrink_rwsem);
        spin_lock(&shrinker_lock);
@@ -428,13 +428,10 @@ static unsigned int ttm_pool_shrink(void)
        list_move_tail(&pt->shrinker_list, &shrinker_list);
        spin_unlock(&shrinker_lock);
 
-       p = ttm_pool_type_take(pt, ttm_pool_nid(pt->pool));
-       if (p) {
-               ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true);
-               num_pages = 1 << pt->order;
-       } else {
-               num_pages = 0;
-       }
+       num_pages = list_lru_walk_node(&pt->pages, nid, 
pool_move_to_dispose_list, &dispose, &num_to_free);
+       num_pages *= 1 << pt->order;
+
+       ttm_pool_dispose_list(pt, &dispose);
        up_read(&pool_shrink_rwsem);
 
        return num_pages;
@@ -783,6 +780,7 @@ static int __ttm_pool_alloc(struct ttm_pool *pool, struct 
ttm_tt *tt,
                pt = ttm_pool_select_type(pool, page_caching, order);
                if (pt && allow_pools)
                        p = ttm_pool_type_take(pt, ttm_pool_nid(pool));
+
                /*
                 * If that fails or previously failed, allocate from system.
                 * Note that this also disallows additional pool allocations 
using
@@ -931,8 +929,10 @@ void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt 
*tt)
 {
        ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
 
-       while (atomic_long_read(&allocated_pages) > page_pool_size)
-               ttm_pool_shrink();
+       while (atomic_long_read(&allocated_pages) > page_pool_size) {
+               unsigned long diff = page_pool_size - 
atomic_long_read(&allocated_pages);
+               ttm_pool_shrink(ttm_pool_nid(pool), diff);
+       }
 }
 EXPORT_SYMBOL(ttm_pool_free);
 
@@ -1189,7 +1189,7 @@ static unsigned long ttm_pool_shrinker_scan(struct 
shrinker *shrink,
        unsigned long num_freed = 0;
 
        do
-               num_freed += ttm_pool_shrink();
+               num_freed += ttm_pool_shrink(sc->nid, sc->nr_to_scan);
        while (num_freed < sc->nr_to_scan &&
               atomic_long_read(&allocated_pages));
 
@@ -1317,11 +1317,15 @@ static int ttm_pool_debugfs_shrink_show(struct seq_file 
*m, void *data)
                .nr_to_scan = TTM_SHRINKER_BATCH,
        };
        unsigned long count;
+       int nid;
 
        fs_reclaim_acquire(GFP_KERNEL);
-       count = ttm_pool_shrinker_count(mm_shrinker, &sc);
-       seq_printf(m, "%lu/%lu\n", count,
-                  ttm_pool_shrinker_scan(mm_shrinker, &sc));
+       for_each_node(nid) {
+               sc.nid = nid;
+               count = ttm_pool_shrinker_count(mm_shrinker, &sc);
+               seq_printf(m, "%d: %lu/%lu\n", nid, count,
+                          ttm_pool_shrinker_scan(mm_shrinker, &sc));
+       }
        fs_reclaim_release(GFP_KERNEL);
 
        return 0;
@@ -1369,7 +1373,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
 #endif
 #endif
 
-       mm_shrinker = shrinker_alloc(0, "drm-ttm_pool");
+       mm_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "drm-ttm_pool");
        if (!mm_shrinker)
                return -ENOMEM;
 
-- 
2.49.0

Reply via email to