From: Dave Airlie <airl...@redhat.com>

This gets the memory sizes from the nodes and stores the limit
as 50% of those. I think eventually we should drop the limits
once we have memcg aware shrinking, but this should be more NUMA
friendly, and I think seems like what people would prefer to
happen on NUMA aware systems.

Cc: Christian Koenig <christian.koe...@amd.com>
Signed-off-by: Dave Airlie <airl...@redhat.com>
---
 drivers/gpu/drm/ttm/ttm_pool.c | 57 +++++++++++++++++++++++++---------
 1 file changed, 43 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index c0391ccd179b..6f44654ea743 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -115,10 +115,11 @@ struct ttm_pool_tt_restore {
 
 static unsigned long page_pool_size;
 
-MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
+MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool per 
NUMA node");
 module_param(page_pool_size, ulong, 0644);
 
-static atomic_long_t allocated_pages;
+static unsigned long pool_node_limit[MAX_NUMNODES];
+static atomic_long_t allocated_pages[MAX_NUMNODES];
 
 static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
 static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
@@ -303,8 +304,8 @@ static void ttm_pool_type_give(struct ttm_pool_type *pt, 
struct page *p)
        rcu_read_lock();
        list_lru_add(&pt->pages, &p->lru, nid, NULL);
        rcu_read_unlock();
-       atomic_long_add(1 << pt->order, &allocated_pages);
 
+       atomic_long_add(num_pages, &allocated_pages[nid]);
        mod_node_page_state(NODE_DATA(nid), NR_GPU_ACTIVE, -num_pages);
        mod_node_page_state(NODE_DATA(nid), NR_GPU_RECLAIM, num_pages);
 }
@@ -330,7 +331,7 @@ static struct page *ttm_pool_type_take(struct ttm_pool_type 
*pt, int nid)
 
        ret = list_lru_walk_node(&pt->pages, nid, take_one_from_lru, (void 
*)&p, &nr_to_walk);
        if (ret == 1 && p) {
-               atomic_long_sub(1 << pt->order, &allocated_pages);
+               atomic_long_sub(1 << pt->order, &allocated_pages[nid]);
                mod_node_page_state(NODE_DATA(nid), NR_GPU_ACTIVE, (1 << 
pt->order));
                mod_node_page_state(NODE_DATA(nid), NR_GPU_RECLAIM, -(1 << 
pt->order));
        }
@@ -369,7 +370,7 @@ static void ttm_pool_dispose_list(struct ttm_pool_type *pt,
                struct page *p;
                p = list_first_entry(dispose, struct page, lru);
                list_del_init(&p->lru);
-               atomic_long_sub(1 << pt->order, &allocated_pages);
+               atomic_long_sub(1 << pt->order, 
&allocated_pages[page_to_nid(p)]);
                ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true);
        }
 }
@@ -927,11 +928,13 @@ int ttm_pool_restore_and_alloc(struct ttm_pool *pool, 
struct ttm_tt *tt,
  */
 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
 {
+       int nid = ttm_pool_nid(pool);
+
        ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
 
-       while (atomic_long_read(&allocated_pages) > page_pool_size) {
-               unsigned long diff = page_pool_size - 
atomic_long_read(&allocated_pages);
-               ttm_pool_shrink(ttm_pool_nid(pool), diff);
+       while (atomic_long_read(&allocated_pages[nid]) > pool_node_limit[nid]) {
+               unsigned long diff = pool_node_limit[nid] - 
atomic_long_read(&allocated_pages[nid]);
+               ttm_pool_shrink(nid, diff);
        }
 }
 EXPORT_SYMBOL(ttm_pool_free);
@@ -1191,7 +1194,7 @@ static unsigned long ttm_pool_shrinker_scan(struct 
shrinker *shrink,
        do
                num_freed += ttm_pool_shrink(sc->nid, sc->nr_to_scan);
        while (num_freed < sc->nr_to_scan &&
-              atomic_long_read(&allocated_pages));
+              atomic_long_read(&allocated_pages[sc->nid]));
 
        sc->nr_scanned = num_freed;
 
@@ -1202,7 +1205,7 @@ static unsigned long ttm_pool_shrinker_scan(struct 
shrinker *shrink,
 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
                                             struct shrink_control *sc)
 {
-       unsigned long num_pages = atomic_long_read(&allocated_pages);
+       unsigned long num_pages = atomic_long_read(&allocated_pages[sc->nid]);
 
        return num_pages ? num_pages : SHRINK_EMPTY;
 }
@@ -1239,8 +1242,12 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type 
*pt,
 /* Dump the total amount of allocated pages */
 static void ttm_pool_debugfs_footer(struct seq_file *m)
 {
-       seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
-                  atomic_long_read(&allocated_pages), page_pool_size);
+       int nid;
+
+       for_each_node(nid) {
+               seq_printf(m, "\ntotal node%d\t: %8lu of %8lu\n", nid,
+                          atomic_long_read(&allocated_pages[nid]), 
pool_node_limit[nid]);
+       }
 }
 
 /* Dump the information for the global pools */
@@ -1334,6 +1341,22 @@ DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
 
 #endif
 
+static inline uint64_t ttm_get_node_memory_size(int nid)
+{
+       /* This is directly using si_meminfo_node implementation as the
+        * function is not exported.
+        */
+       int zone_type;
+       uint64_t managed_pages = 0;
+
+       pg_data_t *pgdat = NODE_DATA(nid);
+
+       for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
+               managed_pages +=
+                       zone_managed_pages(&pgdat->node_zones[zone_type]);
+       return managed_pages * PAGE_SIZE;
+}
+
 /**
  * ttm_pool_mgr_init - Initialize globals
  *
@@ -1345,8 +1368,14 @@ int ttm_pool_mgr_init(unsigned long num_pages)
 {
        unsigned int i;
 
-       if (!page_pool_size)
-               page_pool_size = num_pages;
+       int nid;
+       for_each_node(nid) {
+               if (!page_pool_size) {
+                       uint64_t node_size = ttm_get_node_memory_size(nid);
+                       pool_node_limit[nid] = (node_size >> PAGE_SHIFT) / 2;
+               } else
+                       pool_node_limit[nid] = page_pool_size;
+       }
 
        spin_lock_init(&shrinker_lock);
        INIT_LIST_HEAD(&shrinker_list);
-- 
2.49.0

Reply via email to