From: "Liam R. Howlett" <liam.howl...@oracle.com>

Allocate a sheaf and fill it to the count amount.  Does not fill to the
sheaf limit to detect incorrect allocation requests.

Signed-off-by: Liam R. Howlett <liam.howl...@oracle.com>
---
 tools/include/linux/slab.h   | 24 +++++++++++++
 tools/testing/shared/linux.c | 84 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 108 insertions(+)

diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h
index 
a475364cfd9fcdb10db252aab18ea3a620326b6b..0b6b42c9921fc402b4f3d4f681a95c9067d128db
 100644
--- a/tools/include/linux/slab.h
+++ b/tools/include/linux/slab.h
@@ -22,6 +22,13 @@ enum slab_state {
        FULL
 };
 
+struct slab_sheaf {
+       struct kmem_cache *cache;
+       unsigned int size;
+       unsigned int capacity;
+       void *objects[];
+};
+
 struct kmem_cache_args {
        unsigned int align;
        unsigned int sheaf_capacity;
@@ -79,4 +86,21 @@ void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t 
size, void **list);
 int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
                          void **list);
 
+struct slab_sheaf *
+kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size);
+
+void *
+kmem_cache_alloc_from_sheaf(struct kmem_cache *s, gfp_t gfp,
+               struct slab_sheaf *sheaf);
+
+void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
+               struct slab_sheaf *sheaf);
+int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+               struct slab_sheaf **sheafp, unsigned int size);
+
+static inline unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf)
+{
+       return sheaf->size;
+}
+
 #endif         /* _TOOLS_SLAB_H */
diff --git a/tools/testing/shared/linux.c b/tools/testing/shared/linux.c
index 
9f5fd722f27f1d3877be8927be30409cd74ab3c3..a61c755c3c87e80036a5173115e955bfe7d5a80c
 100644
--- a/tools/testing/shared/linux.c
+++ b/tools/testing/shared/linux.c
@@ -181,6 +181,12 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t 
gfp, size_t size,
        if (kmalloc_verbose)
                pr_debug("Bulk alloc %lu\n", size);
 
+       if (cachep->exec_callback) {
+               if (cachep->callback)
+                       cachep->callback(cachep->private);
+               cachep->exec_callback = false;
+       }
+
        pthread_mutex_lock(&cachep->lock);
        if (cachep->nr_objs >= size) {
                struct radix_tree_node *node;
@@ -270,6 +276,84 @@ __kmem_cache_create_args(const char *name, unsigned int 
size,
        return ret;
 }
 
+struct slab_sheaf *
+kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size)
+{
+       struct slab_sheaf *sheaf;
+       unsigned int capacity;
+
+       if (size > s->sheaf_capacity)
+               capacity = size;
+       else
+               capacity = s->sheaf_capacity;
+
+       sheaf = malloc(sizeof(*sheaf) + sizeof(void *) * s->sheaf_capacity * 
capacity);
+       if (!sheaf) {
+               return NULL;
+       }
+
+       memset(sheaf, 0, size);
+       sheaf->cache = s;
+       sheaf->capacity = capacity;
+       sheaf->size = kmem_cache_alloc_bulk(s, gfp, size, sheaf->objects);
+       if (!sheaf->size) {
+               free(sheaf);
+               return NULL;
+       }
+
+       return sheaf;
+}
+
+int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+                struct slab_sheaf **sheafp, unsigned int size)
+{
+       struct slab_sheaf *sheaf = *sheafp;
+       int refill;
+
+       if (sheaf->size >= size)
+               return 0;
+
+       if (size > sheaf->capacity) {
+               sheaf = kmem_cache_prefill_sheaf(s, gfp, size);
+               if (!sheaf)
+                       return -ENOMEM;
+
+               kmem_cache_return_sheaf(s, gfp, *sheafp);
+               *sheafp = sheaf;
+               return 0;
+       }
+
+       refill = kmem_cache_alloc_bulk(s, gfp, size - sheaf->size,
+                                      &sheaf->objects[sheaf->size]);
+       if (!refill)
+               return -ENOMEM;
+
+       sheaf->size += refill;
+       return 0;
+}
+
+void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
+                struct slab_sheaf *sheaf)
+{
+       if (sheaf->size) {
+               //s->non_kernel += sheaf->size;
+               kmem_cache_free_bulk(s, sheaf->size, &sheaf->objects[0]);
+       }
+       free(sheaf);
+}
+
+void *
+kmem_cache_alloc_from_sheaf(struct kmem_cache *s, gfp_t gfp,
+               struct slab_sheaf *sheaf)
+{
+       if (sheaf->size == 0) {
+               printf("Nothing left in sheaf!\n");
+               return NULL;
+       }
+
+       return sheaf->objects[--sheaf->size];
+}
+
 /*
  * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
  */

-- 
2.48.1


Reply via email to