Managing bio slab cache via xarray by using slab cache size as xarray index, and storing 'struct bio_slab' instance into xarray.
So code is simplified a lot, meantime is is more readable than before. Signed-off-by: Ming Lei <ming....@redhat.com> --- block/bio.c | 104 +++++++++++++++++++++------------------------------- 1 file changed, 42 insertions(+), 62 deletions(-) diff --git a/block/bio.c b/block/bio.c index 1f2cc1fbe283..aa657cdd7c8c 100644 --- a/block/bio.c +++ b/block/bio.c @@ -19,6 +19,7 @@ #include <linux/highmem.h> #include <linux/sched/sysctl.h> #include <linux/blk-crypto.h> +#include <linux/xarray.h> #include <trace/events/block.h> #include "blk.h" @@ -58,61 +59,49 @@ struct bio_slab { char name[8]; }; static DEFINE_MUTEX(bio_slab_lock); -static struct bio_slab *bio_slabs; -static unsigned int bio_slab_nr, bio_slab_max; +static DEFINE_XARRAY(bio_slabs); + +static struct bio_slab *create_bio_slab(unsigned int size) +{ + struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL); + if (!bslab) + return NULL; + + snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size); + bslab->slab = kmem_cache_create(bslab->name, size, + ARCH_KMALLOC_MINALIGN, SLAB_HWCACHE_ALIGN, NULL); + if (bslab->slab) { + bslab->slab_ref = 1; + bslab->slab_size = size; + } else { + kfree(bslab); + bslab = NULL; + } + return bslab; +} static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) { unsigned int sz = sizeof(struct bio) + extra_size; struct kmem_cache *slab = NULL; - struct bio_slab *bslab, *new_bio_slabs; - unsigned int new_bio_slab_max; - unsigned int i, entry = -1; + struct bio_slab *bslab; mutex_lock(&bio_slab_lock); - - i = 0; - while (i < bio_slab_nr) { - bslab = &bio_slabs[i]; - - if (!bslab->slab && entry == -1) - entry = i; - else if (bslab->slab_size == sz) { + bslab = xa_load(&bio_slabs, sz); + if (bslab) { + slab = bslab->slab; + bslab->slab_ref++; + } else { + bslab = create_bio_slab(sz); + if(bslab && !xa_err(xa_store(&bio_slabs, sz, bslab, + GFP_KERNEL))) slab = bslab->slab; - bslab->slab_ref++; - break; + else { + if (bslab) + kmem_cache_destroy(bslab->slab); + kfree(bslab); } - i++; - } - - if (slab) - goto out_unlock; - - if (bio_slab_nr == bio_slab_max && entry == -1) { - new_bio_slab_max = bio_slab_max << 1; - new_bio_slabs = krealloc(bio_slabs, - new_bio_slab_max * sizeof(struct bio_slab), - GFP_KERNEL); - if (!new_bio_slabs) - goto out_unlock; - bio_slab_max = new_bio_slab_max; - bio_slabs = new_bio_slabs; } - if (entry == -1) - entry = bio_slab_nr++; - - bslab = &bio_slabs[entry]; - - snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); - slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN, - SLAB_HWCACHE_ALIGN, NULL); - if (!slab) - goto out_unlock; - - bslab->slab = slab; - bslab->slab_ref = 1; - bslab->slab_size = sz; -out_unlock: mutex_unlock(&bio_slab_lock); return slab; } @@ -120,27 +109,26 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) static void bio_put_slab(struct bio_set *bs) { struct bio_slab *bslab = NULL; - unsigned int i; + unsigned int slab_size = bs->front_pad + sizeof(struct bio) + + BIO_INLINE_VECS * sizeof(struct bio_vec); mutex_lock(&bio_slab_lock); - for (i = 0; i < bio_slab_nr; i++) { - if (bs->bio_slab == bio_slabs[i].slab) { - bslab = &bio_slabs[i]; - break; - } - } - + bslab = xa_load(&bio_slabs, slab_size); if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) goto out; + WARN_ON_ONCE(bslab->slab != bs->bio_slab); + WARN_ON(!bslab->slab_ref); if (--bslab->slab_ref) goto out; + xa_erase(&bio_slabs, slab_size); + kmem_cache_destroy(bslab->slab); - bslab->slab = NULL; + kfree(bslab); out: mutex_unlock(&bio_slab_lock); @@ -1651,16 +1639,8 @@ static void __init biovec_init_slabs(void) static int __init init_bio(void) { - bio_slab_max = 2; - bio_slab_nr = 0; - bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab), - GFP_KERNEL); - BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET); - if (!bio_slabs) - panic("bio: can't allocate bios\n"); - bio_integrity_init(); biovec_init_slabs(); -- 2.28.0