The branch main has been updated by bz:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=1c81ebec74d8a71c770f7835d3bc6e02c13467a0

commit 1c81ebec74d8a71c770f7835d3bc6e02c13467a0
Author:     Bjoern A. Zeeb <b...@freebsd.org>
AuthorDate: 2025-03-20 23:54:12 +0000
Commit:     Bjoern A. Zeeb <b...@freebsd.org>
CommitDate: 2025-04-12 20:49:19 +0000

    LinuxKPI: switch mallocarray to an lkpi implementation using __kmalloc()
    
    With mallocarray() we cannot guarantee that any size larger than
    PAGE_SIZE will be contiguous.  Switch kmalloc_array() and
    kmalloc_array_node() to use __kmalloc()/lkpi___kmalloc_node() as their
    underlying implementation which now does provide that guarantee.
    Likewise adjust kcalloc_node() to use kmalloc_array_node().
    This means we only have two (plain + _node) underlying allocation
    routines for the entire category of functions.
    
    Also adjust kvmalloc() and kvmalloc_array() to be a "mirrored"
    implementation to their non-v counterparts. These may return
    non-contiguous memory so can use malloc().
    
    Sponsored by:   The FreeBSD Foundation
    MFC after:      3 days
    Reviewed by:    jhb
    Extra thanks to: jhb for helping sorting this out
    Differential Revision: https://reviews.freebsd.org/D46657
---
 sys/compat/linuxkpi/common/include/linux/slab.h | 48 +++++++++++++++----------
 sys/compat/linuxkpi/common/src/linux_compat.c   | 12 +++----
 sys/compat/linuxkpi/common/src/linux_slab.c     | 12 +++++++
 3 files changed, 48 insertions(+), 24 deletions(-)

diff --git a/sys/compat/linuxkpi/common/include/linux/slab.h 
b/sys/compat/linuxkpi/common/include/linux/slab.h
index 07c16884b00e..ddaabcef0b9c 100644
--- a/sys/compat/linuxkpi/common/include/linux/slab.h
+++ b/sys/compat/linuxkpi/common/include/linux/slab.h
@@ -42,7 +42,6 @@
 MALLOC_DECLARE(M_KMALLOC);
 
 #define        kmalloc(size, flags)            lkpi_kmalloc(size, flags)
-#define        kvmalloc(size, flags)           kmalloc(size, flags)
 #define        kvzalloc(size, flags)           kmalloc(size, (flags) | 
__GFP_ZERO)
 #define        kvcalloc(n, size, flags)        kvmalloc_array(n, size, (flags) 
| __GFP_ZERO)
 #define        kzalloc(size, flags)            kmalloc(size, (flags) | 
__GFP_ZERO)
@@ -93,6 +92,7 @@ struct linux_kmem_cache;
 
 extern void *lkpi_kmalloc(size_t size, gfp_t flags);
 void *lkpi___kmalloc(size_t size, gfp_t flags);
+void *lkpi___kmalloc_node(size_t size, gfp_t flags, int node);
 #define        __kmalloc(_s, _f)       lkpi___kmalloc(_s, _f)
 
 static inline gfp_t
@@ -113,23 +113,39 @@ linux_check_m_flags(gfp_t flags)
 static inline void *
 kmalloc_node(size_t size, gfp_t flags, int node)
 {
-       return (malloc_domainset(size, M_KMALLOC,
-           linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
+       return (lkpi___kmalloc_node(size, flags, node));
+}
+
+static inline void *
+kmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+       if (WOULD_OVERFLOW(n, size))
+               panic("%s: %zu * %zu overflowed", __func__, n, size);
+
+       return (kmalloc(size * n, flags));
 }
 
 static inline void *
 kcalloc(size_t n, size_t size, gfp_t flags)
 {
        flags |= __GFP_ZERO;
-       return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags)));
+       return (kmalloc_array(n, size, linux_check_m_flags(flags)));
+}
+
+static inline void *
+kmalloc_array_node(size_t n, size_t size, gfp_t flags, int node)
+{
+       if (WOULD_OVERFLOW(n, size))
+               panic("%s: %zu * %zu overflowed", __func__, n, size);
+
+       return (kmalloc_node(size * n, flags, node));
 }
 
 static inline void *
 kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
 {
        flags |= __GFP_ZERO;
-       return (mallocarray_domainset(n, size, M_KMALLOC,
-           linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
+       return (kmalloc_array_node(n, size, flags, node));
 }
 
 static inline void *
@@ -151,23 +167,20 @@ vmalloc_32(size_t size)
        return (contigmalloc(size, M_KMALLOC, M_WAITOK, 0, UINT_MAX, 1, 1));
 }
 
+/* May return non-contiguous memory. */
 static inline void *
-kmalloc_array(size_t n, size_t size, gfp_t flags)
-{
-       return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags)));
-}
-
-static inline void *
-kmalloc_array_node(size_t n, size_t size, gfp_t flags, int node)
+kvmalloc(size_t size, gfp_t flags)
 {
-       return (mallocarray_domainset(n, size, M_KMALLOC,
-           linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
+       return (malloc(size, M_KMALLOC, linux_check_m_flags(flags)));
 }
 
 static inline void *
 kvmalloc_array(size_t n, size_t size, gfp_t flags)
 {
-       return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags)));
+       if (WOULD_OVERFLOW(n, size))
+               panic("%s: %zu * %zu overflowed", __func__, n, size);
+
+       return (kvmalloc(size * n, flags));
 }
 
 static inline void *
@@ -179,9 +192,8 @@ krealloc(void *ptr, size_t size, gfp_t flags)
 static inline void *
 krealloc_array(void *ptr, size_t n, size_t size, gfp_t flags)
 {
-       if (WOULD_OVERFLOW(n, size)) {
+       if (WOULD_OVERFLOW(n, size))
                return NULL;
-       }
 
        return (realloc(ptr, n * size, M_KMALLOC, linux_check_m_flags(flags)));
 }
diff --git a/sys/compat/linuxkpi/common/src/linux_compat.c 
b/sys/compat/linuxkpi/common/src/linux_compat.c
index 217df0aec91f..30acd1b54e9c 100644
--- a/sys/compat/linuxkpi/common/src/linux_compat.c
+++ b/sys/compat/linuxkpi/common/src/linux_compat.c
@@ -2847,8 +2847,8 @@ linux_compat_init(void *arg)
        boot_cpu_data.x86_model = CPUID_TO_MODEL(cpu_id);
        boot_cpu_data.x86_vendor = x86_vendor;
 
-       __cpu_data = mallocarray(mp_maxid + 1,
-           sizeof(*__cpu_data), M_KMALLOC, M_WAITOK | M_ZERO);
+       __cpu_data = kmalloc_array(mp_maxid + 1,
+           sizeof(*__cpu_data), M_WAITOK | M_ZERO);
        CPU_FOREACH(i) {
                __cpu_data[i].x86_clflush_size = cpu_clflush_line_size;
                __cpu_data[i].x86_max_cores = mp_ncpus;
@@ -2890,8 +2890,8 @@ linux_compat_init(void *arg)
         * This is used by cpumask_of() (and possibly others in the future) for,
         * e.g., drivers to pass hints to irq_set_affinity_hint().
         */
-       static_single_cpu_mask = mallocarray(mp_maxid + 1,
-           sizeof(static_single_cpu_mask), M_KMALLOC, M_WAITOK | M_ZERO);
+       static_single_cpu_mask = kmalloc_array(mp_maxid + 1,
+           sizeof(static_single_cpu_mask), M_WAITOK | M_ZERO);
 
        /*
         * When the number of CPUs reach a threshold, we start to save memory
@@ -2910,9 +2910,9 @@ linux_compat_init(void *arg)
                 * (_BITSET_BITS / 8)' bytes (for comparison with the
                 * overlapping scheme).
                 */
-               static_single_cpu_mask_lcs = mallocarray(mp_ncpus,
+               static_single_cpu_mask_lcs = kmalloc_array(mp_ncpus,
                    sizeof(*static_single_cpu_mask_lcs),
-                   M_KMALLOC, M_WAITOK | M_ZERO);
+                   M_WAITOK | M_ZERO);
 
                sscm_ptr = static_single_cpu_mask_lcs;
                CPU_FOREACH(i) {
diff --git a/sys/compat/linuxkpi/common/src/linux_slab.c 
b/sys/compat/linuxkpi/common/src/linux_slab.c
index bc780ab3f609..4b0b18178a5c 100644
--- a/sys/compat/linuxkpi/common/src/linux_slab.c
+++ b/sys/compat/linuxkpi/common/src/linux_slab.c
@@ -207,6 +207,18 @@ linux_kmem_cache_destroy(struct linux_kmem_cache *c)
        free(c, M_KMALLOC);
 }
 
+void *
+lkpi___kmalloc_node(size_t size, gfp_t flags, int node)
+{
+       if (size <= PAGE_SIZE)
+               return (malloc_domainset(size, M_KMALLOC,
+                   linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
+       else
+               return (contigmalloc_domainset(size, M_KMALLOC,
+                   linux_get_vm_domain_set(node), linux_check_m_flags(flags),
+                   0, -1UL, PAGE_SIZE, 0));
+}
+
 void *
 lkpi___kmalloc(size_t size, gfp_t flags)
 {

Reply via email to