Add ability to allocate memory for memzones from named heaps. The
semantics are kept similar to regular allocations, and as much of
the code as possible is shared.

Signed-off-by: Anatoly Burakov <anatoly.bura...@intel.com>
---
 lib/librte_eal/common/eal_common_memzone.c  | 237 +++++++++++++++-----
 lib/librte_eal/common/include/rte_memzone.h | 183 +++++++++++++++
 lib/librte_eal/rte_eal_version.map          |   3 +
 3 files changed, 373 insertions(+), 50 deletions(-)

diff --git a/lib/librte_eal/common/eal_common_memzone.c 
b/lib/librte_eal/common/eal_common_memzone.c
index 25c56052c..d37e7ae1d 100644
--- a/lib/librte_eal/common/eal_common_memzone.c
+++ b/lib/librte_eal/common/eal_common_memzone.c
@@ -98,17 +98,14 @@ find_heap_max_free_elem(int *s, unsigned align)
        return len;
 }
 
-static const struct rte_memzone *
-memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
-               int socket_id, unsigned int flags, unsigned int align,
+static int
+common_checks(const char *name, size_t len, unsigned int align,
                unsigned int bound)
 {
        struct rte_memzone *mz;
        struct rte_mem_config *mcfg;
        struct rte_fbarray *arr;
        size_t requested_len;
-       int mz_idx;
-       bool contig;
 
        /* get pointer to global configuration */
        mcfg = rte_eal_get_configuration()->mem_config;
@@ -118,14 +115,14 @@ memzone_reserve_aligned_thread_unsafe(const char *name, 
size_t len,
        if (arr->count >= arr->len) {
                RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__);
                rte_errno = ENOSPC;
-               return NULL;
+               return -1;
        }
 
        if (strlen(name) > sizeof(mz->name) - 1) {
                RTE_LOG(DEBUG, EAL, "%s(): memzone <%s>: name too long\n",
                        __func__, name);
                rte_errno = ENAMETOOLONG;
-               return NULL;
+               return -1;
        }
 
        /* zone already exist */
@@ -133,7 +130,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, 
size_t len,
                RTE_LOG(DEBUG, EAL, "%s(): memzone <%s> already exists\n",
                        __func__, name);
                rte_errno = EEXIST;
-               return NULL;
+               return -1;
        }
 
        /* if alignment is not a power of two */
@@ -141,7 +138,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, 
size_t len,
                RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__,
                                align);
                rte_errno = EINVAL;
-               return NULL;
+               return -1;
        }
 
        /* alignment less than cache size is not allowed */
@@ -151,7 +148,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, 
size_t len,
        /* align length on cache boundary. Check for overflow before doing so */
        if (len > SIZE_MAX - RTE_CACHE_LINE_MASK) {
                rte_errno = EINVAL; /* requested size too big */
-               return NULL;
+               return -1;
        }
 
        len += RTE_CACHE_LINE_MASK;
@@ -163,49 +160,23 @@ memzone_reserve_aligned_thread_unsafe(const char *name, 
size_t len,
        /* check that boundary condition is valid */
        if (bound != 0 && (requested_len > bound || !rte_is_power_of_2(bound))) 
{
                rte_errno = EINVAL;
-               return NULL;
-       }
-
-       if ((socket_id != SOCKET_ID_ANY) &&
-           (socket_id >= RTE_MAX_NUMA_NODES || socket_id < 0)) {
-               rte_errno = EINVAL;
-               return NULL;
-       }
-
-       if (!rte_eal_has_hugepages())
-               socket_id = SOCKET_ID_ANY;
-
-       contig = (flags & RTE_MEMZONE_IOVA_CONTIG) != 0;
-       /* malloc only cares about size flags, remove contig flag from flags */
-       flags &= ~RTE_MEMZONE_IOVA_CONTIG;
-
-       if (len == 0) {
-               /* len == 0 is only allowed for non-contiguous zones */
-               if (contig) {
-                       RTE_LOG(DEBUG, EAL, "Reserving zero-length contiguous 
memzones is not supported\n");
-                       rte_errno = EINVAL;
-                       return NULL;
-               }
-               if (bound != 0)
-                       requested_len = bound;
-               else {
-                       requested_len = find_heap_max_free_elem(&socket_id, 
align);
-                       if (requested_len == 0) {
-                               rte_errno = ENOMEM;
-                               return NULL;
-                       }
-               }
-       }
-
-       /* allocate memory on heap */
-       void *mz_addr = malloc_heap_alloc(NULL, requested_len, socket_id, flags,
-                       align, bound, contig);
-       if (mz_addr == NULL) {
-               rte_errno = ENOMEM;
-               return NULL;
+               return -1;
        }
+       return 0;
+}
 
+static const struct rte_memzone *
+create_memzone(const char *name, void *mz_addr, size_t requested_len)
+{
+       struct rte_mem_config *mcfg;
+       struct rte_fbarray *arr;
        struct malloc_elem *elem = malloc_elem_from_data(mz_addr);
+       struct rte_memzone *mz;
+       int mz_idx;
+
+       /* get pointer to global configuration */
+       mcfg = rte_eal_get_configuration()->mem_config;
+       arr = &mcfg->memzones;
 
        /* fill the zone in config */
        mz_idx = rte_fbarray_find_next_free(arr, 0);
@@ -236,6 +207,134 @@ memzone_reserve_aligned_thread_unsafe(const char *name, 
size_t len,
        return mz;
 }
 
+static const struct rte_memzone *
+memzone_reserve_from_heap_aligned_thread_unsafe(const char *name, size_t len,
+               const char *heap_name, unsigned int flags, unsigned int align,
+               unsigned int bound)
+{
+       size_t requested_len = len;
+       void *mz_addr;
+       int heap_idx;
+       bool contig;
+
+       /* this function sets rte_errno */
+       if (common_checks(name, len, align, bound) < 0)
+               return NULL;
+
+       heap_idx = malloc_heap_find_named_heap_idx(heap_name);
+       if (heap_idx < 0) {
+               rte_errno = ENOENT;
+               return NULL;
+       }
+
+       contig = (flags & RTE_MEMZONE_IOVA_CONTIG) != 0;
+       /* malloc only cares about size flags, remove contig flag from flags */
+       flags &= ~RTE_MEMZONE_IOVA_CONTIG;
+
+       if (len == 0) {
+               /* len == 0 is only allowed for non-contiguous zones */
+               if (contig) {
+                       RTE_LOG(DEBUG, EAL, "Reserving zero-length contiguous 
memzones is not supported\n");
+                       rte_errno = EINVAL;
+                       return NULL;
+               }
+               if (bound != 0)
+                       requested_len = bound;
+               else {
+                       requested_len = heap_max_free_elem(heap_idx, align);
+                       if (requested_len == 0) {
+                               rte_errno = ENOMEM;
+                               return NULL;
+                       }
+               }
+       }
+
+       /* allocate memory on heap */
+       mz_addr = malloc_heap_alloc_on_heap_id(NULL, requested_len, heap_idx,
+                       flags, align, bound, contig);
+       if (mz_addr == NULL) {
+               rte_errno = ENOMEM;
+               return NULL;
+       }
+       return create_memzone(name, mz_addr, requested_len);
+}
+
+static const struct rte_memzone *
+memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
+               int socket_id, unsigned int flags, unsigned int align,
+               unsigned int bound)
+{
+       size_t requested_len = len;
+       bool contig;
+       void *mz_addr;
+
+       /* this function sets rte_errno */
+       if (common_checks(name, len, align, bound) < 0)
+               return NULL;
+
+       if ((socket_id != SOCKET_ID_ANY) &&
+                       (socket_id >= RTE_MAX_NUMA_NODES || socket_id < 0)) {
+               rte_errno = EINVAL;
+               return NULL;
+       }
+
+       if (!rte_eal_has_hugepages())
+               socket_id = SOCKET_ID_ANY;
+
+       contig = (flags & RTE_MEMZONE_IOVA_CONTIG) != 0;
+       /* malloc only cares about size flags, remove contig flag from flags */
+       flags &= ~RTE_MEMZONE_IOVA_CONTIG;
+
+       if (len == 0) {
+               /* len == 0 is only allowed for non-contiguous zones */
+               if (contig) {
+                       RTE_LOG(DEBUG, EAL, "Reserving zero-length contiguous 
memzones is not supported\n");
+                       rte_errno = EINVAL;
+                       return NULL;
+               }
+               if (bound != 0)
+                       requested_len = bound;
+               else {
+                       requested_len = find_heap_max_free_elem(&socket_id,
+                                       align);
+                       if (requested_len == 0) {
+                               rte_errno = ENOMEM;
+                               return NULL;
+                       }
+               }
+       }
+
+       /* allocate memory on heap */
+       mz_addr = malloc_heap_alloc(NULL, requested_len, socket_id, flags,
+                       align, bound, contig);
+       if (mz_addr == NULL) {
+               rte_errno = ENOMEM;
+               return NULL;
+       }
+       return create_memzone(name, mz_addr, requested_len);
+}
+
+static const struct rte_memzone *
+rte_memzone_reserve_from_heap_thread_safe(const char *name, size_t len,
+               const char *heap_name, unsigned int flags, unsigned int align,
+               unsigned int bound)
+{
+       struct rte_mem_config *mcfg;
+       const struct rte_memzone *mz = NULL;
+
+       /* get pointer to global configuration */
+       mcfg = rte_eal_get_configuration()->mem_config;
+
+       rte_rwlock_write_lock(&mcfg->mlock);
+
+       mz = memzone_reserve_from_heap_aligned_thread_unsafe(name, len,
+                       heap_name, flags, align, bound);
+
+       rte_rwlock_write_unlock(&mcfg->mlock);
+
+       return mz;
+}
+
 static const struct rte_memzone *
 rte_memzone_reserve_thread_safe(const char *name, size_t len, int socket_id,
                unsigned int flags, unsigned int align, unsigned int bound)
@@ -293,6 +392,44 @@ rte_memzone_reserve(const char *name, size_t len, int 
socket_id,
                                               flags, RTE_CACHE_LINE_SIZE, 0);
 }
 
+/*
+ * Return a pointer to a correctly filled memzone descriptor (with a
+ * specified alignment and boundary). If the allocation cannot be done,
+ * return NULL.
+ */
+const struct rte_memzone *
+rte_memzone_reserve_from_heap_bounded(const char *name, size_t len,
+               const char *heap_name, unsigned int flags, unsigned int align,
+               unsigned int bound)
+{
+       return rte_memzone_reserve_from_heap_thread_safe(name, len, heap_name,
+                       flags, align, bound);
+}
+
+/*
+ * Return a pointer to a correctly filled memzone descriptor (with a
+ * specified alignment). If the allocation cannot be done, return NULL.
+ */
+const struct rte_memzone *
+rte_memzone_reserve_from_heap_aligned(const char *name, size_t len,
+               const char *heap_name, unsigned int flags, unsigned int align)
+{
+       return rte_memzone_reserve_from_heap_thread_safe(name, len, heap_name,
+                       flags, align, 0);
+}
+
+/*
+ * Return a pointer to a correctly filled memzone descriptor. If the
+ * allocation cannot be done, return NULL.
+ */
+const struct rte_memzone *
+rte_memzone_reserve_from_heap(const char *name, size_t len,
+               const char *heap_name, unsigned int flags)
+{
+       return rte_memzone_reserve_from_heap_thread_safe(name, len, heap_name,
+                       flags, RTE_CACHE_LINE_SIZE, 0);
+}
+
 int
 rte_memzone_free(const struct rte_memzone *mz)
 {
diff --git a/lib/librte_eal/common/include/rte_memzone.h 
b/lib/librte_eal/common/include/rte_memzone.h
index ef370fa6f..b27e5c421 100644
--- a/lib/librte_eal/common/include/rte_memzone.h
+++ b/lib/librte_eal/common/include/rte_memzone.h
@@ -258,6 +258,189 @@ const struct rte_memzone 
*rte_memzone_reserve_bounded(const char *name,
                        size_t len, int socket_id,
                        unsigned flags, unsigned align, unsigned bound);
 
+/**
+ * Reserve a portion of physical memory from a specified named heap.
+ *
+ * This function reserves some memory and returns a pointer to a
+ * correctly filled memzone descriptor. If the allocation cannot be
+ * done, return NULL.
+ *
+ * @note Reserving memzones with len set to 0 will only attempt to allocate
+ *   memzones from memory that is already available. It will not trigger any
+ *   new allocations.
+ *
+ * @note Reserving IOVA-contiguous memzones with len set to 0 is not currently
+ *   supported.
+ *
+ * @param name
+ *   The name of the memzone. If it already exists, the function will
+ *   fail and return NULL.
+ * @param len
+ *   The size of the memory to be reserved. If it
+ *   is 0, the biggest contiguous zone will be reserved.
+ * @param heap_name
+ *   The name of the heap to reserve memory from.
+ * @param flags
+ *   The flags parameter is used to request memzones to be
+ *   taken from specifically sized hugepages.
+ *   - RTE_MEMZONE_2MB - Reserved from 2MB pages
+ *   - RTE_MEMZONE_1GB - Reserved from 1GB pages
+ *   - RTE_MEMZONE_16MB - Reserved from 16MB pages
+ *   - RTE_MEMZONE_16GB - Reserved from 16GB pages
+ *   - RTE_MEMZONE_256KB - Reserved from 256KB pages
+ *   - RTE_MEMZONE_256MB - Reserved from 256MB pages
+ *   - RTE_MEMZONE_512MB - Reserved from 512MB pages
+ *   - RTE_MEMZONE_4GB - Reserved from 4GB pages
+ *   - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if
+ *                                  the requested page size is unavailable.
+ *                                  If this flag is not set, the function
+ *                                  will return error on an unavailable size
+ *                                  request.
+ *   - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous.
+ *                               This option should be used when allocating
+ *                               memory intended for hardware rings etc.
+ * @return
+ *   A pointer to a correctly-filled read-only memzone descriptor, or NULL
+ *   on error.
+ *   On error case, rte_errno will be set appropriately:
+ *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config 
structure
+ *    - E_RTE_SECONDARY - function was called from a secondary process instance
+ *    - ENOSPC - the maximum number of memzones has already been allocated
+ *    - EEXIST - a memzone with the same name already exists
+ *    - ENOMEM - no appropriate memory area found in which to create memzone
+ *    - EINVAL - invalid parameters
+ */
+__rte_experimental const struct rte_memzone *
+rte_memzone_reserve_from_heap(const char *name, size_t len,
+               const char *heap_name, unsigned int flags);
+
+/**
+ * Reserve a portion of physical memory from a specified named heap with
+ * alignment on a specified boundary.
+ *
+ * This function reserves some memory with alignment on a specified
+ * boundary, and returns a pointer to a correctly filled memzone
+ * descriptor. If the allocation cannot be done or if the alignment
+ * is not a power of 2, returns NULL.
+ *
+ * @note Reserving memzones with len set to 0 will only attempt to allocate
+ *   memzones from memory that is already available. It will not trigger any
+ *   new allocations.
+ *
+ * @note Reserving IOVA-contiguous memzones with len set to 0 is not currently
+ *   supported.
+ *
+ * @param name
+ *   The name of the memzone. If it already exists, the function will
+ *   fail and return NULL.
+ * @param len
+ *   The size of the memory to be reserved. If it
+ *   is 0, the biggest contiguous zone will be reserved.
+ * @param heap_name
+ *   The name of the heap to reserve memory from.
+ * @param flags
+ *   The flags parameter is used to request memzones to be
+ *   taken from specifically sized hugepages.
+ *   - RTE_MEMZONE_2MB - Reserved from 2MB pages
+ *   - RTE_MEMZONE_1GB - Reserved from 1GB pages
+ *   - RTE_MEMZONE_16MB - Reserved from 16MB pages
+ *   - RTE_MEMZONE_16GB - Reserved from 16GB pages
+ *   - RTE_MEMZONE_256KB - Reserved from 256KB pages
+ *   - RTE_MEMZONE_256MB - Reserved from 256MB pages
+ *   - RTE_MEMZONE_512MB - Reserved from 512MB pages
+ *   - RTE_MEMZONE_4GB - Reserved from 4GB pages
+ *   - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if
+ *                                  the requested page size is unavailable.
+ *                                  If this flag is not set, the function
+ *                                  will return error on an unavailable size
+ *                                  request.
+ *   - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous.
+ *                               This option should be used when allocating
+ *                               memory intended for hardware rings etc.
+ * @param align
+ *   Alignment for resulting memzone. Must be a power of 2.
+ * @return
+ *   A pointer to a correctly-filled read-only memzone descriptor, or NULL
+ *   on error.
+ *   On error case, rte_errno will be set appropriately:
+ *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config 
structure
+ *    - E_RTE_SECONDARY - function was called from a secondary process instance
+ *    - ENOSPC - the maximum number of memzones has already been allocated
+ *    - EEXIST - a memzone with the same name already exists
+ *    - ENOMEM - no appropriate memory area found in which to create memzone
+ *    - EINVAL - invalid parameters
+ */
+__rte_experimental const struct rte_memzone *
+rte_memzone_reserve_from_heap_aligned(const char *name, size_t len,
+               const char *heap_name, unsigned int flags, unsigned int align);
+
+/**
+ * Reserve a portion of physical memory from a specified named heap with
+ * specified alignment and boundary.
+ *
+ * This function reserves some memory with specified alignment and
+ * boundary, and returns a pointer to a correctly filled memzone
+ * descriptor. If the allocation cannot be done or if the alignment
+ * or boundary are not a power of 2, returns NULL.
+ * Memory buffer is reserved in a way, that it wouldn't cross specified
+ * boundary. That implies that requested length should be less or equal
+ * then boundary.
+ *
+ * @note Reserving memzones with len set to 0 will only attempt to allocate
+ *   memzones from memory that is already available. It will not trigger any
+ *   new allocations.
+ *
+ * @note Reserving IOVA-contiguous memzones with len set to 0 is not currently
+ *   supported.
+ *
+ * @param name
+ *   The name of the memzone. If it already exists, the function will
+ *   fail and return NULL.
+ * @param len
+ *   The size of the memory to be reserved. If it
+ *   is 0, the biggest contiguous zone will be reserved.
+ * @param heap_name
+ *   The name of the heap to reserve memory from.
+ * @param flags
+ *   The flags parameter is used to request memzones to be
+ *   taken from specifically sized hugepages.
+ *   - RTE_MEMZONE_2MB - Reserved from 2MB pages
+ *   - RTE_MEMZONE_1GB - Reserved from 1GB pages
+ *   - RTE_MEMZONE_16MB - Reserved from 16MB pages
+ *   - RTE_MEMZONE_16GB - Reserved from 16GB pages
+ *   - RTE_MEMZONE_256KB - Reserved from 256KB pages
+ *   - RTE_MEMZONE_256MB - Reserved from 256MB pages
+ *   - RTE_MEMZONE_512MB - Reserved from 512MB pages
+ *   - RTE_MEMZONE_4GB - Reserved from 4GB pages
+ *   - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if
+ *                                  the requested page size is unavailable.
+ *                                  If this flag is not set, the function
+ *                                  will return error on an unavailable size
+ *                                  request.
+ *   - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous.
+ *                               This option should be used when allocating
+ *                               memory intended for hardware rings etc.
+ * @param align
+ *   Alignment for resulting memzone. Must be a power of 2.
+ * @param bound
+ *   Boundary for resulting memzone. Must be a power of 2 or zero.
+ *   Zero value implies no boundary condition.
+ * @return
+ *   A pointer to a correctly-filled read-only memzone descriptor, or NULL
+ *   on error.
+ *   On error case, rte_errno will be set appropriately:
+ *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config 
structure
+ *    - E_RTE_SECONDARY - function was called from a secondary process instance
+ *    - ENOSPC - the maximum number of memzones has already been allocated
+ *    - EEXIST - a memzone with the same name already exists
+ *    - ENOMEM - no appropriate memory area found in which to create memzone
+ *    - EINVAL - invalid parameters
+ */
+__rte_experimental const struct rte_memzone *
+rte_memzone_reserve_from_heap_bounded(const char *name, size_t len,
+               const char *heap_name, unsigned int flags, unsigned int align,
+               unsigned int bound);
+
 /**
  * Free a memzone.
  *
diff --git a/lib/librte_eal/rte_eal_version.map 
b/lib/librte_eal/rte_eal_version.map
index cdde7eb3b..db1cfae6a 100644
--- a/lib/librte_eal/rte_eal_version.map
+++ b/lib/librte_eal/rte_eal_version.map
@@ -294,6 +294,9 @@ EXPERIMENTAL {
        rte_memseg_contig_walk;
        rte_memseg_list_walk;
        rte_memseg_walk;
+       rte_memzone_reserve_from_heap;
+       rte_memzone_reserve_from_heap_aligned;
+       rte_memzone_reserve_from_heap_bounded;
        rte_mp_action_register;
        rte_mp_action_unregister;
        rte_mp_reply;
-- 
2.17.1

Reply via email to