On 03.02.2021 20:20, Yang Shi wrote:
> Now nr_deferred is available on per memcg level for memcg aware shrinkers, so 
> don't need
> allocate shrinker->nr_deferred for such shrinkers anymore.
> 
> The prealloc_memcg_shrinker() would return -ENOSYS if !CONFIG_MEMCG or memcg 
> is disabled
> by kernel command line, then shrinker's SHRINKER_MEMCG_AWARE flag would be 
> cleared.
> This makes the implementation of this patch simpler.
> 
> Acked-by: Vlastimil Babka <vba...@suse.cz>
> Signed-off-by: Yang Shi <shy828...@gmail.com>
> ---
>  mm/vmscan.c | 31 ++++++++++++++++---------------
>  1 file changed, 16 insertions(+), 15 deletions(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 545422d2aeec..20a35d26ae12 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -334,6 +334,9 @@ static int prealloc_memcg_shrinker(struct shrinker 
> *shrinker)
>  {
>       int id, ret = -ENOMEM;
>  
> +     if (mem_cgroup_disabled())
> +             return -ENOSYS;
> +
>       down_write(&shrinker_rwsem);
>       /* This may call shrinker, so it must use down_read_trylock() */
>       id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL);
> @@ -414,7 +417,7 @@ static bool writeback_throttling_sane(struct scan_control 
> *sc)
>  #else
>  static int prealloc_memcg_shrinker(struct shrinker *shrinker)
>  {
> -     return 0;
> +     return -ENOSYS;
>  }
>  
>  static void unregister_memcg_shrinker(struct shrinker *shrinker)
> @@ -525,8 +528,18 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, 
> enum lru_list lru, int zone
>   */
>  int prealloc_shrinker(struct shrinker *shrinker)
>  {
> -     unsigned int size = sizeof(*shrinker->nr_deferred);
> +     unsigned int size;
> +     int err;
> +
> +     if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
> +             err = prealloc_memcg_shrinker(shrinker);
> +             if (err != -ENOSYS)
> +                     return err;
>  
> +             shrinker->flags &= ~SHRINKER_MEMCG_AWARE;
> +     }
> +
> +     size = sizeof(*shrinker->nr_deferred);
>       if (shrinker->flags & SHRINKER_NUMA_AWARE)
>               size *= nr_node_ids;

This may sound surprisingly, but IIRC do_shrink_slab() may be called on early 
boot
*even before* root_mem_cgroup is allocated. AFAIR, I received syzcaller crash 
report
because of this, when I was implementing shrinker_maps.

This is a reason why we don't use shrinker_maps even in case of mem cgroup is 
not
disabled: we iterate every shrinker of shrinker_list. See check in 
shrink_slab():

        if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))

Possible, we should do the same for nr_deferred: 1)always allocate 
shrinker->nr_deferred,
2)use shrinker->nr_deferred in count_nr_deferred() and set_nr_deferred().

>  
> @@ -534,26 +547,14 @@ int prealloc_shrinker(struct shrinker *shrinker)
>       if (!shrinker->nr_deferred)
>               return -ENOMEM;
>  
> -     if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
> -             if (prealloc_memcg_shrinker(shrinker))
> -                     goto free_deferred;
> -     }
>  
>       return 0;
> -
> -free_deferred:
> -     kfree(shrinker->nr_deferred);
> -     shrinker->nr_deferred = NULL;
> -     return -ENOMEM;
>  }
>  
>  void free_prealloced_shrinker(struct shrinker *shrinker)
>  {
> -     if (!shrinker->nr_deferred)
> -             return;
> -
>       if (shrinker->flags & SHRINKER_MEMCG_AWARE)
> -             unregister_memcg_shrinker(shrinker);
> +             return unregister_memcg_shrinker(shrinker);
>  
>       kfree(shrinker->nr_deferred);
>       shrinker->nr_deferred = NULL;
> 

Reply via email to