From: Dave Airlie <airl...@redhat.com>

This is an initial port of the TTM pools for
write combined and uncached pages to use the list_lru.

This makes the pool's more NUMA aware and avoids
needing separate NUMA pools (later commit enables this).

Cc: Christian Koenig <christian.koe...@amd.com>
Cc: Johannes Weiner <han...@cmpxchg.org>
Cc: Dave Chinner <da...@fromorbit.com>
Signed-off-by: Dave Airlie <airl...@redhat.com>
---
 drivers/gpu/drm/ttm/tests/ttm_device_test.c |  2 +-
 drivers/gpu/drm/ttm/tests/ttm_pool_test.c   | 32 ++++----
 drivers/gpu/drm/ttm/ttm_pool.c              | 84 +++++++++++++++------
 include/drm/ttm/ttm_pool.h                  |  2 +-
 4 files changed, 81 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/ttm/tests/ttm_device_test.c 
b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
index 1621903818e5..1f207fd222bc 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_device_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
@@ -183,7 +183,7 @@ static void ttm_device_init_pools(struct kunit *test)
 
                                if (params->use_dma_alloc)
                                        KUNIT_ASSERT_FALSE(test,
-                                                          
list_empty(&pt.pages));
+                                                          
!list_lru_count(&pt.pages));
                        }
                }
        }
diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c 
b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
index 8ade53371f72..39234a3e98c4 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
@@ -248,7 +248,7 @@ static void ttm_pool_alloc_order_caching_match(struct kunit 
*test)
        pool = ttm_pool_pre_populated(test, size, caching);
 
        pt = &pool->caching[caching].orders[order];
-       KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+       KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt->pages));
 
        tt = ttm_tt_kunit_init(test, 0, caching, size);
        KUNIT_ASSERT_NOT_NULL(test, tt);
@@ -256,7 +256,7 @@ static void ttm_pool_alloc_order_caching_match(struct kunit 
*test)
        err = ttm_pool_alloc(pool, tt, &simple_ctx);
        KUNIT_ASSERT_EQ(test, err, 0);
 
-       KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+       KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt->pages));
 
        ttm_pool_free(pool, tt);
        ttm_tt_fini(tt);
@@ -282,8 +282,8 @@ static void ttm_pool_alloc_caching_mismatch(struct kunit 
*test)
        tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
        KUNIT_ASSERT_NOT_NULL(test, tt);
 
-       KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
-       KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
+       KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_pool->pages));
+       KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt_tt->pages));
 
        err = ttm_pool_alloc(pool, tt, &simple_ctx);
        KUNIT_ASSERT_EQ(test, err, 0);
@@ -291,8 +291,8 @@ static void ttm_pool_alloc_caching_mismatch(struct kunit 
*test)
        ttm_pool_free(pool, tt);
        ttm_tt_fini(tt);
 
-       KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
-       KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
+       KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_pool->pages));
+       KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_tt->pages));
 
        ttm_pool_fini(pool);
 }
@@ -316,8 +316,8 @@ static void ttm_pool_alloc_order_mismatch(struct kunit 
*test)
        tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
        KUNIT_ASSERT_NOT_NULL(test, tt);
 
-       KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
-       KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
+       KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_pool->pages));
+       KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt_tt->pages));
 
        err = ttm_pool_alloc(pool, tt, &simple_ctx);
        KUNIT_ASSERT_EQ(test, err, 0);
@@ -325,8 +325,8 @@ static void ttm_pool_alloc_order_mismatch(struct kunit 
*test)
        ttm_pool_free(pool, tt);
        ttm_tt_fini(tt);
 
-       KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
-       KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
+       KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_pool->pages));
+       KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_tt->pages));
 
        ttm_pool_fini(pool);
 }
@@ -352,12 +352,12 @@ static void ttm_pool_free_dma_alloc(struct kunit *test)
        ttm_pool_alloc(pool, tt, &simple_ctx);
 
        pt = &pool->caching[caching].orders[order];
-       KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+       KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt->pages));
 
        ttm_pool_free(pool, tt);
        ttm_tt_fini(tt);
 
-       KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+       KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt->pages));
 
        ttm_pool_fini(pool);
 }
@@ -383,12 +383,12 @@ static void ttm_pool_free_no_dma_alloc(struct kunit *test)
        ttm_pool_alloc(pool, tt, &simple_ctx);
 
        pt = &pool->caching[caching].orders[order];
-       KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
+       KUNIT_ASSERT_TRUE(test, list_lru_count(&pt->pages) == 1);
 
        ttm_pool_free(pool, tt);
        ttm_tt_fini(tt);
 
-       KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
+       KUNIT_ASSERT_TRUE(test, list_lru_count(&pt->pages) == 1);
 
        ttm_pool_fini(pool);
 }
@@ -404,11 +404,11 @@ static void ttm_pool_fini_basic(struct kunit *test)
        pool = ttm_pool_pre_populated(test, size, caching);
        pt = &pool->caching[caching].orders[order];
 
-       KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+       KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt->pages));
 
        ttm_pool_fini(pool);
 
-       KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+       KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt->pages));
 }
 
 static struct kunit_case ttm_pool_test_cases[] = {
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 99197aac09a1..785b141d18df 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -130,6 +130,16 @@ static struct list_head shrinker_list;
 static struct shrinker *mm_shrinker;
 static DECLARE_RWSEM(pool_shrink_rwsem);
 
+/* helper to get a current valid node id from a pool */
+static int ttm_pool_nid(struct ttm_pool *pool) {
+       int nid = NUMA_NO_NODE;
+       if (pool)
+               nid = pool->nid;
+       if (nid == NUMA_NO_NODE)
+               nid = numa_node_id();
+       return nid;
+}
+
 /* Allocate pages of size 1 << order with the given gfp_flags */
 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
                                        unsigned int order)
@@ -272,7 +282,7 @@ static void ttm_pool_unmap(struct ttm_pool *pool, 
dma_addr_t dma_addr,
 }
 
 /* Give pages into a specific pool_type */
-static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
+static void ttm_pool_type_give(struct ttm_pool_type *pt, int nid, struct page 
*p)
 {
        unsigned int i, num_pages = 1 << pt->order;
 
@@ -284,25 +294,46 @@ static void ttm_pool_type_give(struct ttm_pool_type *pt, 
struct page *p)
        }
 
        spin_lock(&pt->lock);
-       list_add(&p->lru, &pt->pages);
+       INIT_LIST_HEAD(&p->lru);
+       rcu_read_lock();
+       list_lru_add(&pt->pages, &p->lru, nid, NULL);
+       rcu_read_unlock();
        spin_unlock(&pt->lock);
        atomic_long_add(1 << pt->order, &allocated_pages);
 }
 
+struct take_one_info {
+       struct ttm_pool_type *pt;
+       struct page *out;
+};
+
+static enum lru_status take_one_from_lru(struct list_head *item,
+                                        struct list_lru_one *list,
+                                        void *cb_arg)
+{
+       struct take_one_info *info = cb_arg;
+       struct ttm_pool_type *pt = info->pt;
+       struct page *p = container_of(item, struct page, lru);
+       list_lru_isolate(list, item);
+       atomic_long_sub(1 << pt->order, &allocated_pages);
+       info->out = p;
+       return LRU_REMOVED;
+}
+
 /* Take pages from a specific pool_type, return NULL when nothing available */
-static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
+static struct page *ttm_pool_type_take(struct ttm_pool_type *pt, int nid)
 {
-       struct page *p;
+       struct take_one_info info = {
+               .pt = pt,
+               .out = NULL,
+       };
+       unsigned long nr_to_walk = 1;
 
        spin_lock(&pt->lock);
-       p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
-       if (p) {
-               atomic_long_sub(1 << pt->order, &allocated_pages);
-               list_del(&p->lru);
-       }
+       list_lru_walk_node(&pt->pages, nid, take_one_from_lru, (void *)&info, 
&nr_to_walk);
        spin_unlock(&pt->lock);
 
-       return p;
+       return info.out;
 }
 
 /* Initialize and add a pool type to the global shrinker list */
@@ -313,24 +344,37 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, 
struct ttm_pool *pool,
        pt->caching = caching;
        pt->order = order;
        spin_lock_init(&pt->lock);
-       INIT_LIST_HEAD(&pt->pages);
+       list_lru_init(&pt->pages);
 
        spin_lock(&shrinker_lock);
        list_add_tail(&pt->shrinker_list, &shrinker_list);
        spin_unlock(&shrinker_lock);
 }
 
+static enum lru_status pool_free_page(struct list_head *item,
+                                         struct list_lru_one *list,
+                                         void *cb_arg)
+{
+       struct ttm_pool_type *pt = cb_arg;
+       struct page *p = container_of(item, struct page, lru);
+
+       list_lru_isolate(list, item);
+
+       atomic_long_sub(1 << pt->order, &allocated_pages);
+       ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+       return LRU_REMOVED;
+}
+
 /* Remove a pool_type from the global shrinker list and free all pages */
 static void ttm_pool_type_fini(struct ttm_pool_type *pt)
 {
-       struct page *p;
-
        spin_lock(&shrinker_lock);
        list_del(&pt->shrinker_list);
        spin_unlock(&shrinker_lock);
 
-       while ((p = ttm_pool_type_take(pt)))
-               ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+       spin_lock(&pt->lock);
+       list_lru_walk(&pt->pages, pool_free_page, pt, LONG_MAX);
+       spin_unlock(&pt->lock);
 }
 
 /* Return the pool_type to use for the given caching and order */
@@ -380,7 +424,7 @@ static unsigned int ttm_pool_shrink(void)
        list_move_tail(&pt->shrinker_list, &shrinker_list);
        spin_unlock(&shrinker_lock);
 
-       p = ttm_pool_type_take(pt);
+       p = ttm_pool_type_take(pt, ttm_pool_nid(pt->pool));
        if (p) {
                ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
                num_pages = 1 << pt->order;
@@ -472,7 +516,7 @@ static pgoff_t ttm_pool_unmap_and_free(struct ttm_pool 
*pool, struct page *page,
        }
 
        if (pt)
-               ttm_pool_type_give(pt, page);
+               ttm_pool_type_give(pt, page_to_nid(page), page);
        else
                ttm_pool_free_page(pool, caching, order, page);
 
@@ -734,7 +778,7 @@ static int __ttm_pool_alloc(struct ttm_pool *pool, struct 
ttm_tt *tt,
                p = NULL;
                pt = ttm_pool_select_type(pool, page_caching, order);
                if (pt && allow_pools)
-                       p = ttm_pool_type_take(pt);
+                       p = ttm_pool_type_take(pt, ttm_pool_nid(pool));
                /*
                 * If that fails or previously failed, allocate from system.
                 * Note that this also disallows additional pool allocations 
using
@@ -1164,12 +1208,10 @@ static unsigned long ttm_pool_shrinker_count(struct 
shrinker *shrink,
 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
 {
        unsigned int count = 0;
-       struct page *p;
 
        spin_lock(&pt->lock);
        /* Only used for debugfs, the overhead doesn't matter */
-       list_for_each_entry(p, &pt->pages, lru)
-               ++count;
+       count = list_lru_count(&pt->pages);
        spin_unlock(&pt->lock);
 
        return count;
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
index 54cd34a6e4c0..d1c574f2c58a 100644
--- a/include/drm/ttm/ttm_pool.h
+++ b/include/drm/ttm/ttm_pool.h
@@ -56,7 +56,7 @@ struct ttm_pool_type {
        struct list_head shrinker_list;
 
        spinlock_t lock;
-       struct list_head pages;
+       struct list_lru pages;
 };
 
 /**
-- 
2.49.0

Reply via email to