On 26-Mar-18 5:09 PM, Andrew Rybchenko wrote:
Size of memory chunk required to populate mempool objects depends
on how objects are stored in the memory. Different mempool drivers
may have different requirements and a new operation allows to
calculate memory size in accordance with driver requirements and
advertise requirements on minimum memory chunk size and alignment
in a generic way.

Bump ABI version since the patch breaks it.

Suggested-by: Olivier Matz <olivier.m...@6wind.com>
Signed-off-by: Andrew Rybchenko <arybche...@solarflare.com>
---

Hi Andrew,

<...>

-       total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
        for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
-               size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift,
-                                               mp->flags);
+               size_t min_chunk_size;
+
+               mem_size = rte_mempool_ops_calc_mem_size(mp, n, pg_shift,
+                               &min_chunk_size, &align);
+               if (mem_size < 0) {
+                       ret = mem_size;
+                       goto fail;
+               }
ret = snprintf(mz_name, sizeof(mz_name),
                        RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id);
@@ -606,7 +600,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
                        goto fail;
                }
- mz = rte_memzone_reserve_aligned(mz_name, size,
+               mz = rte_memzone_reserve_aligned(mz_name, mem_size,
                        mp->socket_id, mz_flags, align);
                /* not enough memory, retry with the biggest zone we have */
                if (mz == NULL)
@@ -617,6 +611,12 @@ rte_mempool_populate_default(struct rte_mempool *mp)
                        goto fail;
                }
+ if (mz->len < min_chunk_size) {
+                       rte_memzone_free(mz);
+                       ret = -ENOMEM;
+                       goto fail;
+               }
+
                if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG)
                        iova = RTE_BAD_IOVA;

OK by me, but needs to be rebased.

                else
@@ -649,13 +649,14 @@ rte_mempool_populate_default(struct rte_mempool *mp)
  static size_t
  get_anon_size(const struct rte_mempool *mp)
  {
-       size_t size, total_elt_sz, pg_sz, pg_shift;
+       size_t size, pg_sz, pg_shift;
+       size_t min_chunk_size;
+       size_t align;
pg_sz = getpagesize();

<...>

+/**
+ * Calculate memory size required to store given number of objects.
+ *
+ * If mempool objects are not required to be IOVA-contiguous
+ * (the flag MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines
+ * virtually contiguous chunk size. Otherwise, if mempool objects must
+ * be IOVA-contiguous (the flag MEMPOOL_F_NO_IOVA_CONTIG is clear),
+ * min_chunk_size defines IOVA-contiguous chunk size.
+ *
+ * @param[in] mp
+ *   Pointer to the memory pool.
+ * @param[in] obj_num
+ *   Number of objects.
+ * @param[in] pg_shift
+ *   LOG2 of the physical pages size. If set to 0, ignore page boundaries.
+ * @param[out] min_chunk_size
+ *   Location for minimum size of the memory chunk which may be used to
+ *   store memory pool objects.
+ * @param[out] align
+ *   Location for required memory chunk alignment.
+ * @return
+ *   Required memory size aligned at page boundary.
+ */
+typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
+               uint32_t obj_num,  uint32_t pg_shift,
+               size_t *min_chunk_size, size_t *align);
+
+/**
+ * Default way to calculate memory size required to store given number of
+ * objects.
+ *
+ * If page boundaries may be ignored, it is just a product of total
+ * object size including header and trailer and number of objects.
+ * Otherwise, it is a number of pages required to store given number of
+ * objects without crossing page boundary.
+ *
+ * Note that if object size is bigger than page size, then it assumes
+ * that pages are grouped in subsets of physically continuous pages big
+ * enough to store at least one object.
+ *
+ * If mempool driver requires object addresses to be block size aligned
+ * (MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS), space for one extra element is
+ * reserved to be able to meet the requirement.
+ *
+ * Minimum size of memory chunk is either all required space, if
+ * capabilities say that whole memory area must be physically contiguous
+ * (MEMPOOL_F_CAPA_PHYS_CONTIG), or a maximum of the page size and total
+ * element size.
+ *
+ * Required memory chunk alignment is a maximum of page size and cache
+ * line size.
+ */
+ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
+               uint32_t obj_num, uint32_t pg_shift,
+               size_t *min_chunk_size, size_t *align);

For API docs and wording,

Acked-by: Anatoly Burakov <anatoly.bura...@intel.com>

Should be pretty straightforward to rebase, so you probably should keep my ack for v4.

--
Thanks,
Anatoly

Reply via email to