On Fri, 25 Apr 2025, Vlastimil Babka wrote: > @@ -5924,8 +5948,15 @@ void slab_free(struct kmem_cache *s, struct slab > *slab, void *object, > if (unlikely(!slab_free_hook(s, object, slab_want_init_on_free(s), > false))) > return; > > - if (!s->cpu_sheaves || !free_to_pcs(s, object)) > - do_slab_free(s, slab, object, object, 1, addr); > + if (s->cpu_sheaves) { > + if (likely(!IS_ENABLED(CONFIG_NUMA) || > + slab_nid(slab) == numa_node_id())) {
Ah. ok this removes remote object freeing to the pcs. numa_mem_id() is needed to support memory less numa nodes. > + free_to_pcs(s, object); > + return; > + } > + } > + > + do_slab_free(s, slab, object, object, 1, addr); > } > > #ifdef CONFIG_MEMCG > >