Size of memory chunk required to populate mempool objects depends
on how objects are stored in the memory. Different mempool drivers
may have different requirements and a new operation allows to
calculate memory size in accordance with driver requirements and
advertise requirements on minimum memory chunk size and alignment
in a generic way.

Suggested-by: Olivier Matz <olivier.m...@6wind.com>
Signed-off-by: Andrew Rybchenko <arybche...@solarflare.com>
---
 lib/librte_mempool/rte_mempool.c           | 95 ++++++++++++++++++++++--------
 lib/librte_mempool/rte_mempool.h           | 63 +++++++++++++++++++-
 lib/librte_mempool/rte_mempool_ops.c       | 18 ++++++
 lib/librte_mempool/rte_mempool_version.map |  8 +++
 4 files changed, 159 insertions(+), 25 deletions(-)

diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index e783b9a..1f54f95 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -233,13 +233,14 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t 
flags,
        return sz->total_size;
 }
 
-
 /*
- * Calculate maximum amount of memory required to store given number of 
objects.
+ * Internal function to calculate required memory chunk size shared
+ * by default implementation of the corresponding callback and
+ * deprecated external function.
  */
-size_t
-rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift,
-                     unsigned int flags)
+static size_t
+rte_mempool_xmem_size_int(uint32_t elt_num, size_t total_elt_sz,
+                         uint32_t pg_shift, unsigned int flags)
 {
        size_t obj_per_page, pg_num, pg_sz;
        unsigned int mask;
@@ -264,6 +265,49 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t 
total_elt_sz, uint32_t pg_shift,
        return pg_num << pg_shift;
 }
 
+ssize_t
+rte_mempool_calc_mem_size_def(const struct rte_mempool *mp,
+                             uint32_t obj_num, uint32_t pg_shift,
+                             size_t *min_chunk_size,
+                             __rte_unused size_t *align)
+{
+       unsigned int mp_flags;
+       int ret;
+       size_t total_elt_sz;
+       size_t mem_size;
+
+       /* Get mempool capabilities */
+       mp_flags = 0;
+       ret = rte_mempool_ops_get_capabilities(mp, &mp_flags);
+       if ((ret < 0) && (ret != -ENOTSUP))
+               return ret;
+
+       total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+
+       mem_size = rte_mempool_xmem_size_int(obj_num, total_elt_sz, pg_shift,
+                                            mp->flags | mp_flags);
+
+       if (mp_flags & MEMPOOL_F_CAPA_PHYS_CONTIG)
+               *min_chunk_size = mem_size;
+       else
+               *min_chunk_size = RTE_MAX((size_t)1 << pg_shift, total_elt_sz);
+
+       /* No extra align requirements by default */
+
+       return mem_size;
+}
+
+/*
+ * Calculate maximum amount of memory required to store given number of 
objects.
+ */
+size_t
+rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift,
+                     unsigned int flags)
+{
+       return rte_mempool_xmem_size_int(elt_num, total_elt_sz, pg_shift,
+                                        flags);
+}
+
 /*
  * Calculate how much memory would be actually required with the
  * given memory footprint to store required number of elements.
@@ -570,25 +614,16 @@ rte_mempool_populate_default(struct rte_mempool *mp)
        unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
        char mz_name[RTE_MEMZONE_NAMESIZE];
        const struct rte_memzone *mz;
-       size_t size, total_elt_sz, align, pg_sz, pg_shift;
+       ssize_t mem_size;
+       size_t align, pg_sz, pg_shift;
        rte_iova_t iova;
        unsigned mz_id, n;
-       unsigned int mp_flags;
        int ret;
 
        /* mempool must not be populated */
        if (mp->nb_mem_chunks != 0)
                return -EEXIST;
 
-       /* Get mempool capabilities */
-       mp_flags = 0;
-       ret = rte_mempool_ops_get_capabilities(mp, &mp_flags);
-       if ((ret < 0) && (ret != -ENOTSUP))
-               return ret;
-
-       /* update mempool capabilities */
-       mp->flags |= mp_flags;
-
        if (rte_eal_has_hugepages()) {
                pg_shift = 0; /* not needed, zone is physically contiguous */
                pg_sz = 0;
@@ -599,10 +634,15 @@ rte_mempool_populate_default(struct rte_mempool *mp)
                align = pg_sz;
        }
 
-       total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
        for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
-               size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift,
-                                               mp->flags);
+               size_t min_chunk_size;
+
+               mem_size = rte_mempool_ops_calc_mem_size(mp, n, pg_shift,
+                               &min_chunk_size, &align);
+               if (mem_size < 0) {
+                       ret = mem_size;
+                       goto fail;
+               }
 
                ret = snprintf(mz_name, sizeof(mz_name),
                        RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id);
@@ -611,7 +651,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
                        goto fail;
                }
 
-               mz = rte_memzone_reserve_aligned(mz_name, size,
+               mz = rte_memzone_reserve_aligned(mz_name, mem_size,
                        mp->socket_id, mz_flags, align);
                /* not enough memory, retry with the biggest zone we have */
                if (mz == NULL)
@@ -622,6 +662,12 @@ rte_mempool_populate_default(struct rte_mempool *mp)
                        goto fail;
                }
 
+               if (mz->len < min_chunk_size) {
+                       rte_memzone_free(mz);
+                       ret = -ENOMEM;
+                       goto fail;
+               }
+
                if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
                        iova = RTE_BAD_IOVA;
                else
@@ -654,13 +700,14 @@ rte_mempool_populate_default(struct rte_mempool *mp)
 static size_t
 get_anon_size(const struct rte_mempool *mp)
 {
-       size_t size, total_elt_sz, pg_sz, pg_shift;
+       size_t size, pg_sz, pg_shift;
+       size_t min_chunk_size;
+       size_t align;
 
        pg_sz = getpagesize();
        pg_shift = rte_bsf32(pg_sz);
-       total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
-       size = rte_mempool_xmem_size(mp->size, total_elt_sz, pg_shift,
-                                       mp->flags);
+       size = rte_mempool_ops_calc_mem_size(mp, mp->size, pg_shift,
+                                            &min_chunk_size, &align);
 
        return size;
 }
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index e21026a..be8a371 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -428,6 +428,39 @@ typedef int (*rte_mempool_get_capabilities_t)(const struct 
rte_mempool *mp,
 typedef int (*rte_mempool_ops_register_memory_area_t)
 (const struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len);
 
+/**
+ * Calculate memory size required to store specified number of objects.
+ *
+ * Note that if object size is bigger then page size, then it assumes
+ * that pages are grouped in subsets of physically continuous pages big
+ * enough to store at least one object.
+ *
+ * @param mp
+ *   Pointer to the memory pool.
+ * @param obj_num
+ *   Number of objects.
+ * @param pg_shift
+ *   LOG2 of the physical pages size. If set to 0, ignore page boundaries.
+ * @param min_chunk_size
+ *   Location for minimum size of the memory chunk which may be used to
+ *   store memory pool objects.
+ * @param align
+ *   Location with required memory chunk alignment.
+ * @return
+ *   Required memory size aligned at page boundary.
+ */
+typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
+               uint32_t obj_num,  uint32_t pg_shift,
+               size_t *min_chunk_size, size_t *align);
+
+/**
+ * Default way to calculate memory size required to store specified
+ * number of objects.
+ */
+ssize_t rte_mempool_calc_mem_size_def(const struct rte_mempool *mp,
+                                     uint32_t obj_num, uint32_t pg_shift,
+                                     size_t *min_chunk_size, size_t *align);
+
 /** Structure defining mempool operations structure */
 struct rte_mempool_ops {
        char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */
@@ -444,6 +477,11 @@ struct rte_mempool_ops {
         * Notify new memory area to mempool
         */
        rte_mempool_ops_register_memory_area_t register_memory_area;
+       /**
+        * Optional callback to calculate memory size required to
+        * store specified number of objects.
+        */
+       rte_mempool_calc_mem_size_t calc_mem_size;
 } __rte_cache_aligned;
 
 #define RTE_MEMPOOL_MAX_OPS_IDX 16  /**< Max registered ops structs */
@@ -593,6 +631,29 @@ rte_mempool_ops_register_memory_area(const struct 
rte_mempool *mp,
                                char *vaddr, rte_iova_t iova, size_t len);
 
 /**
+ * @internal wrapper for mempool_ops calc_mem_size callback.
+ * API to calculate size of memory required to store specified number of
+ * object.
+ *
+ * @param mp
+ *   Pointer to the memory pool.
+ * @param obj_num
+ *   Number of objects.
+ * @param pg_shift
+ *   LOG2 of the physical pages size. If set to 0, ignore page boundaries.
+ * @param min_chunk_size
+ *   Location for minimum size of the memory chunk which may be used to
+ *   store memory pool objects.
+ * @param align
+ *   Location with required memory chunk alignment.
+ * @return
+ *   Required memory size aligned at page boundary.
+ */
+ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
+                                     uint32_t obj_num, uint32_t pg_shift,
+                                     size_t *min_chunk_size, size_t *align);
+
+/**
  * @internal wrapper for mempool_ops free callback.
  *
  * @param mp
@@ -1562,7 +1623,7 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, 
uint32_t flags,
  * of objects. Assume that the memory buffer will be aligned at page
  * boundary.
  *
- * Note that if object size is bigger then page size, then it assumes
+ * Note that if object size is bigger than page size, then it assumes
  * that pages are grouped in subsets of physically continuous pages big
  * enough to store at least one object.
  *
diff --git a/lib/librte_mempool/rte_mempool_ops.c 
b/lib/librte_mempool/rte_mempool_ops.c
index 92b9f90..d048b37 100644
--- a/lib/librte_mempool/rte_mempool_ops.c
+++ b/lib/librte_mempool/rte_mempool_ops.c
@@ -88,6 +88,7 @@ rte_mempool_register_ops(const struct rte_mempool_ops *h)
        ops->get_count = h->get_count;
        ops->get_capabilities = h->get_capabilities;
        ops->register_memory_area = h->register_memory_area;
+       ops->calc_mem_size = h->calc_mem_size;
 
        rte_spinlock_unlock(&rte_mempool_ops_table.sl);
 
@@ -152,6 +153,23 @@ rte_mempool_ops_register_memory_area(const struct 
rte_mempool *mp, char *vaddr,
        return ops->register_memory_area(mp, vaddr, iova, len);
 }
 
+/* wrapper to notify new memory area to external mempool */
+ssize_t
+rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
+                               uint32_t obj_num, uint32_t pg_shift,
+                               size_t *min_chunk_size, size_t *align)
+{
+       struct rte_mempool_ops *ops;
+
+       ops = rte_mempool_get_ops(mp->ops_index);
+
+       if (ops->calc_mem_size == NULL)
+               return rte_mempool_calc_mem_size_def(mp, obj_num, pg_shift,
+                                                    min_chunk_size, align);
+
+       return ops->calc_mem_size(mp, obj_num, pg_shift, min_chunk_size, align);
+}
+
 /* sets mempool ops previously registered by rte_mempool_register_ops. */
 int
 rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
diff --git a/lib/librte_mempool/rte_mempool_version.map 
b/lib/librte_mempool/rte_mempool_version.map
index 62b76f9..9fa7270 100644
--- a/lib/librte_mempool/rte_mempool_version.map
+++ b/lib/librte_mempool/rte_mempool_version.map
@@ -51,3 +51,11 @@ DPDK_17.11 {
        rte_mempool_populate_iova_tab;
 
 } DPDK_16.07;
+
+DPDK_18.05 {
+       global:
+
+       rte_mempool_calc_mem_size_def;
+
+} DPDK_17.11;
+
-- 
2.7.4

Reply via email to