On 4/3/25 10:31, Harry Yoo wrote:
>> +/*
>> + * Bulk free objects to the percpu sheaves.
>> + * Unlike free_to_pcs() this includes the calls to all necessary hooks
>> + * and the fallback to freeing to slab pages.
>> + */
>> +static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
>> +{
> 
> [...snip...]
> 
>> +next_batch:
>> +    if (!localtry_trylock(&s->cpu_sheaves->lock))
>> +            goto fallback;
>> +
>> +    pcs = this_cpu_ptr(s->cpu_sheaves);
>> +
>> +    if (unlikely(pcs->main->size == s->sheaf_capacity)) {
>> +
>> +            struct slab_sheaf *empty;
>> +
>> +            if (!pcs->spare) {
>> +                    empty = barn_get_empty_sheaf(pcs->barn);
>> +                    if (empty) {
>> +                            pcs->spare = pcs->main;
>> +                            pcs->main = empty;
>> +                            goto do_free;
>> +                    }
>> +                    goto no_empty;
> 
> Maybe a silly question, but if neither of alloc_from_pcs_bulk() or
> free_to_pcs_bulk() allocates empty sheaves (and sometimes put empty or full
> sheaves in the barn), you should expect usually sheaves not to be in the barn
> when using bulk interfces?

Hm maybe, but with patch 5/8 it becomes cheap to check? And there might be
caches mixing both bulk and individual allocs?
But maybe I should at least add the free sheaf alloc with GFP_NOWAIT attempt
to bulk free? Can't recall if I missed it intentionally or forgot.

>> -static void
>> -init_kmem_cache_node(struct kmem_cache_node *n)
>> +static bool
>> +init_kmem_cache_node(struct kmem_cache_node *n, struct node_barn *barn)
>>  {
> 
> Why is the return type bool, when it always succeeds?

I guess leftover from earlier versions. Will fix.

>> @@ -5421,20 +6295,27 @@ static int init_kmem_cache_nodes(struct kmem_cache 
>> *s)
>>  
>>      for_each_node_mask(node, slab_nodes) {
>>              struct kmem_cache_node *n;
>> +            struct node_barn *barn = NULL;
>>  
>>              if (slab_state == DOWN) {
>>                      early_kmem_cache_node_alloc(node);
>>                      continue;
>>              }
>> +
>> +            if (s->cpu_sheaves) {
>> +                    barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node);
>> +
>> +                    if (!barn)
>> +                            return 0;
>> +            }
>> +
>>              n = kmem_cache_alloc_node(kmem_cache_node,
>>                                              GFP_KERNEL, node);
>> -
>> -            if (!n) {
>> -                    free_kmem_cache_nodes(s);
>> +            if (!n)
>>                      return 0;
>> -            }
> 
> Looks like it's leaking the barn
> if the allocation of kmem_cache_node fails?

Oops right, will add kfree(barn) before return 0;

> 
>> -            init_kmem_cache_node(n);
>> +            init_kmem_cache_node(n, barn);
>> +
>>              s->node[node] = n;
>>      }
>>      return 1;
>> @@ -6005,12 +6891,24 @@ static int slab_mem_going_online_callback(void *arg)
>>       */
>>      mutex_lock(&slab_mutex);
>>      list_for_each_entry(s, &slab_caches, list) {
>> +            struct node_barn *barn = NULL;
>> +
>>              /*
>>               * The structure may already exist if the node was previously
>>               * onlined and offlined.
>>               */
>>              if (get_node(s, nid))
>>                      continue;
>> +
>> +            if (s->cpu_sheaves) {
>> +                    barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, nid);
>> +
>> +                    if (!barn) {
>> +                            ret = -ENOMEM;
>> +                            goto out;
>> +                    }
>> +            }
>> +
> 
> Ditto.
> 
> Otherwise looks good to me :)

Thanks a lot!


Reply via email to