> Add two functions: > - rte_mempool_get_mem_range - get virtual memory range > of the objects in the mempool, > - rte_mempool_get_obj_alignment - get alignment of > objects in the mempool. > > Add two tests that test these new functions.
LGTM in general, few nits/suggestions below. > > Signed-off-by: Paul Szczepanek <paul.szczepa...@arm.com> > Reviewed-by: Jack Bond-Preston <jack.bond-pres...@foss.arm.com> > Reviewed-by: Nathan Brown <nathan.br...@arm.com> > Acked-by: Morten Brørup <m...@smartsharesystems.com> > --- > app/test/test_mempool.c | 71 +++++++++++++++++++++++++++++++++++++++ > lib/mempool/rte_mempool.c | 48 ++++++++++++++++++++++++++ > lib/mempool/rte_mempool.h | 41 ++++++++++++++++++++++ > lib/mempool/version.map | 3 ++ > 4 files changed, 163 insertions(+) > > diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c > index ad7ebd6363..f32d4a3bb9 100644 > --- a/app/test/test_mempool.c > +++ b/app/test/test_mempool.c > @@ -843,12 +843,17 @@ test_mempool(void) > int ret = -1; > uint32_t nb_objs = 0; > uint32_t nb_mem_chunks = 0; > + void *start = NULL; > + size_t length = 0; > + size_t alignment = 0; > + bool ret_bool = false; > struct rte_mempool *mp_cache = NULL; > struct rte_mempool *mp_nocache = NULL; > struct rte_mempool *mp_stack_anon = NULL; > struct rte_mempool *mp_stack_mempool_iter = NULL; > struct rte_mempool *mp_stack = NULL; > struct rte_mempool *default_pool = NULL; > + struct rte_mempool *mp_alignment = NULL; > struct mp_data cb_arg = { > .ret = -1 > }; > @@ -967,6 +972,71 @@ test_mempool(void) > } > rte_mempool_obj_iter(default_pool, my_obj_init, NULL); > > + if (rte_mempool_get_mem_range(default_pool, &start, &length, NULL)) { > + printf("cannot get mem range from default mempool\n"); > + GOTO_ERR(ret, err); > + } > + > + if (rte_mempool_get_mem_range(NULL, NULL, NULL, NULL) != -EINVAL) { > + printf("rte_mempool_get_mem_range failed to return -EINVAL " > + "when passed invalid arguments\n"); > + GOTO_ERR(ret, err); > + } > + > + if (start == NULL || length < (MEMPOOL_SIZE * MEMPOOL_ELT_SIZE)) { > + printf("mem range of default mempool is invalid\n"); > + GOTO_ERR(ret, err); > + } > + > + /* by default mempool objects are aligned by RTE_MEMPOOL_ALIGN */ > + alignment = rte_mempool_get_obj_alignment(default_pool); > + if (alignment != RTE_MEMPOOL_ALIGN) { > + printf("rte_mempool_get_obj_alignment returned wrong value, " > + "expected %zu, returned %zu\n", > + (size_t)RTE_MEMPOOL_ALIGN, alignment); > + GOTO_ERR(ret, err); > + } > + > + /* create a mempool with a RTE_MEMPOOL_F_NO_CACHE_ALIGN flag */ > + mp_alignment = rte_mempool_create("test_alignment", > + 1, 8, /* the small size guarantees single memory chunk */ > + 0, 0, NULL, NULL, my_obj_init, NULL, > + SOCKET_ID_ANY, RTE_MEMPOOL_F_NO_CACHE_ALIGN); > + > + if (mp_alignment == NULL) { > + printf("cannot allocate mempool with " > + "RTE_MEMPOOL_F_NO_CACHE_ALIGN flag\n"); > + GOTO_ERR(ret, err); > + } > + > + /* mempool was created with RTE_MEMPOOL_F_NO_CACHE_ALIGN > + * and minimum alignment is expected which is sizeof(uint64_t) > + */ > + alignment = rte_mempool_get_obj_alignment(mp_alignment); > + if (alignment != sizeof(uint64_t)) { > + printf("rte_mempool_get_obj_alignment returned wrong value, " > + "expected %zu, returned %zu\n", > + (size_t)sizeof(uint64_t), alignment); > + GOTO_ERR(ret, err); > + } > + > + alignment = rte_mempool_get_obj_alignment(NULL); > + if (alignment != 0) { > + printf("rte_mempool_get_obj_alignment failed to return 0 for " > + " an invalid mempool\n"); > + GOTO_ERR(ret, err); > + } > + > + if (rte_mempool_get_mem_range(mp_alignment, NULL, NULL, &ret_bool)) { > + printf("cannot get mem range from mempool\n"); > + GOTO_ERR(ret, err); > + } > + > + if (!ret_bool) { > + printf("mempool not contiguous\n"); > + GOTO_ERR(ret, err); > + } > + > /* retrieve the mempool from its name */ > if (rte_mempool_lookup("test_nocache") != mp_nocache) { > printf("Cannot lookup mempool from its name\n"); > @@ -1039,6 +1109,7 @@ test_mempool(void) > rte_mempool_free(mp_stack_mempool_iter); > rte_mempool_free(mp_stack); > rte_mempool_free(default_pool); > + rte_mempool_free(mp_alignment); > > return ret; > } > diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c > index 12390a2c81..b2551572ed 100644 > --- a/lib/mempool/rte_mempool.c > +++ b/lib/mempool/rte_mempool.c > @@ -1386,6 +1386,54 @@ void rte_mempool_walk(void (*func)(struct rte_mempool > *, void *), > rte_mcfg_mempool_read_unlock(); > } > > +int rte_mempool_get_mem_range(struct rte_mempool *mp, If possible: "const struct rte_mempool * mp". > + void **mem_range_start, size_t *mem_range_length, > + bool *contiguous) Jusr as a suggestion: instead of having 3 different params, probably create an aggregate struct for them: struct rte_mempool_range { uintptr_t start; size_t length; bool contiguous; }; And then pass pointer to that struct as a parameter to fill > +{ > + if (mp == NULL) > + return -EINVAL; > + > + void *address_low = (void *)UINTPTR_MAX; > + void *address_high = 0; > + size_t address_diff = 0; > + size_t mem_total_size = 0; > + struct rte_mempool_memhdr *hdr; > + > + /* go through memory chunks and find the lowest and highest addresses */ > + STAILQ_FOREACH(hdr, &mp->mem_list, next) { > + if (address_low > hdr->addr) > + address_low = hdr->addr; > + if (address_high < RTE_PTR_ADD(hdr->addr, hdr->len)) > + address_high = RTE_PTR_ADD(hdr->addr, hdr->len); > + mem_total_size += hdr->len; > + } > + > + /* check if mempool was not populated yet (no memory chunks) */ > + if (address_low == (void *)UINTPTR_MAX) > + return -EINVAL; > + > + address_diff = (size_t)RTE_PTR_DIFF(address_high, address_low); > + if (mem_range_start != NULL) > + *mem_range_start = address_low; > + if (mem_range_length != NULL) > + *mem_range_length = address_diff; > + if (contiguous != NULL) > + *contiguous = (mem_total_size == address_diff) ? true : false; > + > + return 0; > +} > + > +size_t rte_mempool_get_obj_alignment(struct rte_mempool *mp) Again: const struct rte_mempool *mp; > +{ > + if (mp == NULL) > + return 0; > + > + if (mp->flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) > + return sizeof(uint64_t); > + else > + return RTE_MEMPOOL_ALIGN; > +} > + > struct mempool_callback_data { > TAILQ_ENTRY(mempool_callback_data) callbacks; > rte_mempool_event_callback *func; > diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h > index 23fd5c8465..8a97814b39 100644 > --- a/lib/mempool/rte_mempool.h > +++ b/lib/mempool/rte_mempool.h > @@ -1917,6 +1917,47 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, > uint32_t flags, > void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg), > void *arg); > > +/** > + * @warning > + * @b EXPERIMENTAL: this API may change without prior notice. > + * > + * Get information about the memory range used by the mempool. > + * > + * @param[in] mp > + * Pointer to an initialized mempool. > + * @param[out] mem_range_start > + * Returns lowest address in mempool. May be NULL. > + * @param[out] mem_range_length > + * Returns the length of the memory range containing all the > + * virtual addresses in the memory pool. May be NULL. > + * @param[out] contiguous > + * Returns true if virtual addresses in the memory allocated for the > + * mempool are contiguous. May be NULL. > + * @return > + * 0 on success, -EINVAL if mempool is not valid. > + * > + **/ > +__rte_experimental > +int rte_mempool_get_mem_range(struct rte_mempool *mp, > + void **mem_range_start, size_t *mem_range_length, > + bool *contiguous); > + > +/** > + * @warning > + * @b EXPERIMENTAL: this API may change without prior notice. > + * > + * Return object alignment. > + * > + * @param[in] mp > + * Pointer to a mempool. > + * > + * @return > + * Object alignment if mp is valid. 0 if mp is NULL. > + * > + **/ > +__rte_experimental > +size_t rte_mempool_get_obj_alignment(struct rte_mempool *mp); > + > /** > * @internal Get page size used for mempool object allocation. > * This function is internal to mempool library and mempool drivers. > diff --git a/lib/mempool/version.map b/lib/mempool/version.map > index 473277400c..02df634b2a 100644 > --- a/lib/mempool/version.map > +++ b/lib/mempool/version.map > @@ -50,6 +50,9 @@ EXPERIMENTAL { > __rte_mempool_trace_get_contig_blocks; > __rte_mempool_trace_default_cache; > __rte_mempool_trace_cache_flush; > + # added in 24.07 > + rte_mempool_get_mem_range; > + rte_mempool_get_obj_alignment; > }; > > INTERNAL { > -- > 2.25.1