Some mempool hw like octeontx/fpa block, demands block size aligned buffer address.
Introducing an MEMPOOL_F_POOL_BLK_SZ_ALIGNED flag. If this flag is set: 1) adjust 'off' value to block size aligned value. 2) Allocate one additional buffer. This buffer is used to make sure that requested 'n' buffers get correctly populated to mempool. Example: elem_sz = 2432 // total element size. n = 2111 // requested number of buffer. off = 2304 // new buf_offset value after step 1) vaddr = 0x0 // actual start address of pool pool_len = 5133952 // total pool length i.e.. (elem_sz * n) Since 'off' is a non-zero value so below condition would fail for the block size align case. (((vaddr + off) + (elem_sz * n)) <= (vaddr + pool_len)) Which is incorrect behavior. Additional buffer will solve this problem and correctly populate 'n' buffer to mempool for the aligned mode. Signed-off-by: Santosh Shukla <santosh.shu...@caviumnetworks.com> Signed-off-by: Jerin Jacob <jerin.ja...@caviumnetworks.com> --- lib/librte_mempool/rte_mempool.c | 19 ++++++++++++++++--- lib/librte_mempool/rte_mempool.h | 1 + 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 7dec2f51d..2010857f0 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -350,7 +350,7 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, { unsigned total_elt_sz; unsigned i = 0; - size_t off; + size_t off, delta; struct rte_mempool_memhdr *memhdr; int ret; @@ -387,7 +387,15 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, memhdr->free_cb = free_cb; memhdr->opaque = opaque; - if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN) + if (mp->flags & MEMPOOL_F_POOL_BLK_SZ_ALIGNED) { + delta = (uintptr_t)vaddr % total_elt_sz; + off = total_elt_sz - delta; + /* Validate alignment */ + if (((uintptr_t)vaddr + off) % total_elt_sz) { + RTE_LOG(ERR, MEMPOOL, "vaddr(%p) not aligned to total_elt_sz(%u)\n", (vaddr + off), total_elt_sz); + return -EINVAL; + } + } else if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN) off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr; else off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr; @@ -555,8 +563,13 @@ rte_mempool_populate_default(struct rte_mempool *mp) } total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; + for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) { - size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift); + if (mp->flags & MEMPOOL_F_POOL_BLK_SZ_ALIGNED) + size = rte_mempool_xmem_size(n + 1, total_elt_sz, + pg_shift); + else + size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift); ret = snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id); diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index fd8722e69..99a20263d 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -267,6 +267,7 @@ struct rte_mempool { #define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */ #define MEMPOOL_F_NO_PHYS_CONTIG 0x0020 /**< Don't need physically contiguous objs. */ #define MEMPOOL_F_POOL_CONTIG 0x0040 /**< Detect physcially contiguous objs */ +#define MEMPOOL_F_POOL_BLK_SZ_ALIGNED 0x0080 /**< Align buffer address to block size*/ /** * @internal When debug is enabled, store some statistics. -- 2.13.0