On Fri, May 31, 2024 at 12:14:56PM -0700, Kees Cook wrote:

...

> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index b5c879fa66bc..f42a98d368a9 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -392,6 +392,82 @@ kmem_cache_create(const char *name, unsigned int size, 
> unsigned int align,
>  }
>  EXPORT_SYMBOL(kmem_cache_create);
>  
> +static struct kmem_cache *kmem_buckets_cache __ro_after_init;
> +
> +kmem_buckets *kmem_buckets_create(const char *name, unsigned int align,
> +                               slab_flags_t flags,
> +                               unsigned int useroffset,
> +                               unsigned int usersize,
> +                               void (*ctor)(void *))
> +{
> +     kmem_buckets *b;
> +     int idx;
> +
> +     /*
> +      * When the separate buckets API is not built in, just return
> +      * a non-NULL value for the kmem_buckets pointer, which will be
> +      * unused when performing allocations.
> +      */
> +     if (!IS_ENABLED(CONFIG_SLAB_BUCKETS))
> +             return ZERO_SIZE_PTR;
> +
> +     if (WARN_ON(!kmem_buckets_cache))
> +             return NULL;
> +
> +     b = kmem_cache_alloc(kmem_buckets_cache, GFP_KERNEL|__GFP_ZERO);
> +     if (WARN_ON(!b))
> +             return NULL;
> +
> +     flags |= SLAB_NO_MERGE;
> +
> +     for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) {
> +             char *short_size, *cache_name;
> +             unsigned int cache_useroffset, cache_usersize;
> +             unsigned int size;
> +
> +             if (!kmalloc_caches[KMALLOC_NORMAL][idx])
> +                     continue;
> +
> +             size = kmalloc_caches[KMALLOC_NORMAL][idx]->object_size;
> +             if (!size)
> +                     continue;
> +
> +             short_size = strchr(kmalloc_caches[KMALLOC_NORMAL][idx]->name, 
> '-');
> +             if (WARN_ON(!short_size))
> +                     goto fail;
> +
> +             cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 
> 1);
> +             if (WARN_ON(!cache_name))
> +                     goto fail;
> +
> +             if (useroffset >= size) {
> +                     cache_useroffset = 0;
> +                     cache_usersize = 0;
> +             } else {
> +                     cache_useroffset = useroffset;
> +                     cache_usersize = min(size - cache_useroffset, usersize);
> +             }
> +             (*b)[idx] = kmem_cache_create_usercopy(cache_name, size,
> +                                     align, flags, cache_useroffset,
> +                                     cache_usersize, ctor);
> +             kfree(cache_name);
> +             if (WARN_ON(!(*b)[idx]))
> +                     goto fail;
> +     }
> +
> +     return b;
> +
> +fail:
> +     for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) {
> +             if ((*b)[idx])
> +                     kmem_cache_destroy((*b)[idx]);

nit: I don't think it is necessary to guard this with a check for NULL.

> +     }
> +     kfree(b);
> +
> +     return NULL;
> +}
> +EXPORT_SYMBOL(kmem_buckets_create);

Reply via email to