On Wed, Aug 18, 2021 at 01:27:13PM +0200, Christian König wrote:
> Just a gentle ping?
> 
> Does anybody have any objections? It's just switching back to using a
> spinlock in the hot path instead of a mutex.
> 
> Thanks,
> Christian.
> 
> Am 22.07.21 um 13:34 schrieb Christian König:
> > Switch back to using a spinlock again by moving the IOMMU unmap outside
> > of the locked region.
> > 
> > v2: Add a comment explaining why we need sync_shrinkers().
> > v3: Drop sync_shrinkers() and use an SRCU instead.

Why did you move to your own hand-rolled thing here? From the old thread
it just looked like Andrew wanted some proper explanation. And the
sychronize_shrinkers is imo much clearer than some hand-rolled srcu thing.

Also on the spinlock covnersion, do you have some benchmarks/profile
flamegraphs/numbers that show it matters? Would be realy good to record
that kind of stuff in the commit message instead of just having the
implication that this optimizes stuff.
-Daniel

> > 
> > Signed-off-by: Christian König <christian.koe...@amd.com>
> > ---
> >   drivers/gpu/drm/ttm/ttm_pool.c | 45 ++++++++++++++++++++--------------
> >   1 file changed, 27 insertions(+), 18 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
> > index cb38b1a17b09..cee664c487b5 100644
> > --- a/drivers/gpu/drm/ttm/ttm_pool.c
> > +++ b/drivers/gpu/drm/ttm/ttm_pool.c
> > @@ -70,7 +70,8 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
> >   static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
> >   static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
> > -static struct mutex shrinker_lock;
> > +static spinlock_t shrinker_lock;
> > +DEFINE_STATIC_SRCU(shrinker_srcu);
> >   static struct list_head shrinker_list;
> >   static struct shrinker mm_shrinker;
> > @@ -263,9 +264,9 @@ static void ttm_pool_type_init(struct ttm_pool_type 
> > *pt, struct ttm_pool *pool,
> >     spin_lock_init(&pt->lock);
> >     INIT_LIST_HEAD(&pt->pages);
> > -   mutex_lock(&shrinker_lock);
> > +   spin_lock(&shrinker_lock);
> >     list_add_tail(&pt->shrinker_list, &shrinker_list);
> > -   mutex_unlock(&shrinker_lock);
> > +   spin_unlock(&shrinker_lock);
> >   }
> >   /* Remove a pool_type from the global shrinker list and free all pages */
> > @@ -273,9 +274,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
> >   {
> >     struct page *p;
> > -   mutex_lock(&shrinker_lock);
> > +   spin_lock(&shrinker_lock);
> >     list_del(&pt->shrinker_list);
> > -   mutex_unlock(&shrinker_lock);
> > +   spin_unlock(&shrinker_lock);
> >     while ((p = ttm_pool_type_take(pt)))
> >             ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
> > @@ -313,24 +314,27 @@ static struct ttm_pool_type 
> > *ttm_pool_select_type(struct ttm_pool *pool,
> >   static unsigned int ttm_pool_shrink(void)
> >   {
> >     struct ttm_pool_type *pt;
> > -   unsigned int num_freed;
> > +   unsigned int num_pages;
> >     struct page *p;
> > +   int idx;
> > -   mutex_lock(&shrinker_lock);
> > +   idx = srcu_read_lock(&shrinker_srcu);
> > +
> > +   spin_lock(&shrinker_lock);
> >     pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
> > +   list_move_tail(&pt->shrinker_list, &shrinker_list);
> > +   spin_unlock(&shrinker_lock);
> >     p = ttm_pool_type_take(pt);
> >     if (p) {
> >             ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
> > -           num_freed = 1 << pt->order;
> > +           num_pages = 1 << pt->order;
> >     } else {
> > -           num_freed = 0;
> > +           num_pages = 0;
> >     }
> > -   list_move_tail(&pt->shrinker_list, &shrinker_list);
> > -   mutex_unlock(&shrinker_lock);
> > -
> > -   return num_freed;
> > +   srcu_read_unlock(&shrinker_srcu, idx);
> > +   return num_pages;
> >   }
> >   /* Return the allocation order based for a page */
> > @@ -530,6 +534,11 @@ void ttm_pool_fini(struct ttm_pool *pool)
> >                     for (j = 0; j < MAX_ORDER; ++j)
> >                             ttm_pool_type_fini(&pool->caching[i].orders[j]);
> >     }
> > +
> > +   /* We removed the pool types from the LRU, but we need to also make sure
> > +    * that no shrinker is concurrently freeing pages from the pool.
> > +    */
> > +   synchronize_srcu(&shrinker_srcu);
> >   }
> >   /* As long as pages are available make sure to release at least one */
> > @@ -604,7 +613,7 @@ static int ttm_pool_debugfs_globals_show(struct 
> > seq_file *m, void *data)
> >   {
> >     ttm_pool_debugfs_header(m);
> > -   mutex_lock(&shrinker_lock);
> > +   spin_lock(&shrinker_lock);
> >     seq_puts(m, "wc\t:");
> >     ttm_pool_debugfs_orders(global_write_combined, m);
> >     seq_puts(m, "uc\t:");
> > @@ -613,7 +622,7 @@ static int ttm_pool_debugfs_globals_show(struct 
> > seq_file *m, void *data)
> >     ttm_pool_debugfs_orders(global_dma32_write_combined, m);
> >     seq_puts(m, "uc 32\t:");
> >     ttm_pool_debugfs_orders(global_dma32_uncached, m);
> > -   mutex_unlock(&shrinker_lock);
> > +   spin_unlock(&shrinker_lock);
> >     ttm_pool_debugfs_footer(m);
> > @@ -640,7 +649,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct 
> > seq_file *m)
> >     ttm_pool_debugfs_header(m);
> > -   mutex_lock(&shrinker_lock);
> > +   spin_lock(&shrinker_lock);
> >     for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
> >             seq_puts(m, "DMA ");
> >             switch (i) {
> > @@ -656,7 +665,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct 
> > seq_file *m)
> >             }
> >             ttm_pool_debugfs_orders(pool->caching[i].orders, m);
> >     }
> > -   mutex_unlock(&shrinker_lock);
> > +   spin_unlock(&shrinker_lock);
> >     ttm_pool_debugfs_footer(m);
> >     return 0;
> > @@ -693,7 +702,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
> >     if (!page_pool_size)
> >             page_pool_size = num_pages;
> > -   mutex_init(&shrinker_lock);
> > +   spin_lock_init(&shrinker_lock);
> >     INIT_LIST_HEAD(&shrinker_list);
> >     for (i = 0; i < MAX_ORDER; ++i) {
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

Reply via email to