Plumb kmem_buckets arguments through kvmalloc_node_noprof() so it is
possible to provide an API to perform kvmalloc-style allocations with
a particular set of buckets. Introduce __kvmalloc_node() that takes a
kmem_buckets argument.

Signed-off-by: Kees Cook <keesc...@chromium.org>
---
Cc: Vlastimil Babka <vba...@suse.cz>
Cc: Christoph Lameter <c...@linux.com>
Cc: Pekka Enberg <penb...@kernel.org>
Cc: David Rientjes <rient...@google.com>
Cc: Joonsoo Kim <iamjoonsoo....@lge.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Roman Gushchin <roman.gushc...@linux.dev>
Cc: Hyeonggon Yoo <42.hye...@gmail.com>
Cc: linux...@kvack.org
---
 include/linux/slab.h | 10 ++++++----
 lib/rhashtable.c     |  2 +-
 mm/util.c            |  5 +++--
 3 files changed, 10 insertions(+), 7 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 07373b680894..23b13be0ac95 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -781,11 +781,13 @@ static inline __alloc_size(1) void *kzalloc_noprof(size_t 
size, gfp_t flags)
 #define kzalloc(...)                           
alloc_hooks(kzalloc_noprof(__VA_ARGS__))
 #define kzalloc_node(_size, _flags, _node)     kmalloc_node(_size, 
(_flags)|__GFP_ZERO, _node)
 
-extern void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) 
__alloc_size(1);
-#define kvmalloc_node(...)                     
alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__))
+extern void *kvmalloc_node_noprof(kmem_buckets *b, size_t size, gfp_t flags, 
int node)
+                                       __alloc_size(2);
+#define __kvmalloc_node(...)                   
alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__))
+#define kvmalloc_node(...)                     __kvmalloc_node(NULL, 
__VA_ARGS__)
 
 #define kvmalloc(_size, _flags)                        kvmalloc_node(_size, 
_flags, NUMA_NO_NODE)
-#define kvmalloc_noprof(_size, _flags)         kvmalloc_node_noprof(_size, 
_flags, NUMA_NO_NODE)
+#define kvmalloc_noprof(_size, _flags)         kvmalloc_node_noprof(NULL, 
_size, _flags, NUMA_NO_NODE)
 #define kvzalloc(_size, _flags)                        kvmalloc(_size, 
_flags|__GFP_ZERO)
 
 #define kvzalloc_node(_size, _flags, _node)    kvmalloc_node(_size, 
_flags|__GFP_ZERO, _node)
@@ -797,7 +799,7 @@ static inline __alloc_size(1, 2) void 
*kvmalloc_array_noprof(size_t n, size_t si
        if (unlikely(check_mul_overflow(n, size, &bytes)))
                return NULL;
 
-       return kvmalloc_node_noprof(bytes, flags, NUMA_NO_NODE);
+       return kvmalloc_node_noprof(NULL, bytes, flags, NUMA_NO_NODE);
 }
 
 #define kvmalloc_array(...)                    
alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__))
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index dbbed19f8fff..ef0f496e4aed 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -184,7 +184,7 @@ static struct bucket_table *bucket_table_alloc(struct 
rhashtable *ht,
        static struct lock_class_key __key;
 
        tbl = alloc_hooks_tag(ht->alloc_tag,
-                       kvmalloc_node_noprof(struct_size(tbl, buckets, 
nbuckets),
+                       kvmalloc_node_noprof(NULL, struct_size(tbl, buckets, 
nbuckets),
                                             gfp|__GFP_ZERO, NUMA_NO_NODE));
 
        size = nbuckets;
diff --git a/mm/util.c b/mm/util.c
index 80430e5ba981..bdec4954680a 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -596,6 +596,7 @@ EXPORT_SYMBOL(vm_mmap);
 /**
  * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
  * failure, fall back to non-contiguous (vmalloc) allocation.
+ * @b: which set of kmalloc buckets to allocate from.
  * @size: size of the request.
  * @flags: gfp mask for the allocation - must be compatible (superset) with 
GFP_KERNEL.
  * @node: numa node to allocate from
@@ -609,7 +610,7 @@ EXPORT_SYMBOL(vm_mmap);
  *
  * Return: pointer to the allocated memory of %NULL in case of failure
  */
-void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node)
+void *kvmalloc_node_noprof(kmem_buckets *b, size_t size, gfp_t flags, int node)
 {
        gfp_t kmalloc_flags = flags;
        void *ret;
@@ -631,7 +632,7 @@ void *kvmalloc_node_noprof(size_t size, gfp_t flags, int 
node)
                kmalloc_flags &= ~__GFP_NOFAIL;
        }
 
-       ret = kmalloc_node_noprof(size, kmalloc_flags, node);
+       ret = __kmalloc_node_noprof(b, size, kmalloc_flags, node);
 
        /*
         * It doesn't really make sense to fallback to vmalloc for sub page
-- 
2.34.1


Reply via email to