From: Vamsi Attunuru <vattun...@marvell.com>

Patch adds new mempool flag to avoid scattering mbuf memory
across page boundaries. Mempool created with this flag set,
populated with mbufs which are exactly inside the page boundaries.

Signed-off-by: Vamsi Attunuru <vattun...@marvell.com>
---
 lib/librte_mempool/rte_mempool.c             |  2 +-
 lib/librte_mempool/rte_mempool.h             |  2 ++
 lib/librte_mempool/rte_mempool_ops_default.c | 30 ++++++++++++++++++++++++++++
 3 files changed, 33 insertions(+), 1 deletion(-)

diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 69bd2a6..175a20a 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -338,7 +338,7 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char 
*vaddr,
        i = rte_mempool_ops_populate(mp, mp->size - mp->populated_size,
                (char *)vaddr + off,
                (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off),
-               len - off, mempool_add_elem, NULL);
+               len - off, mempool_add_elem, opaque);
 
        /* not enough room to store one object */
        if (i == 0) {
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 8053f7a..97a1529 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -263,6 +263,8 @@ struct rte_mempool {
 #define MEMPOOL_F_SC_GET         0x0008 /**< Default get is 
"single-consumer".*/
 #define MEMPOOL_F_POOL_CREATED   0x0010 /**< Internal: pool is created. */
 #define MEMPOOL_F_NO_IOVA_CONTIG 0x0020 /**< Don't need IOVA contiguous objs. 
*/
+#define MEMPOOL_F_NO_PAGE_BOUND  0x0040
+/**< Don't create objs on page boundaries. */
 #define MEMPOOL_F_NO_PHYS_CONTIG MEMPOOL_F_NO_IOVA_CONTIG /* deprecated */
 
 /**
diff --git a/lib/librte_mempool/rte_mempool_ops_default.c 
b/lib/librte_mempool/rte_mempool_ops_default.c
index 4e2bfc8..c029e9a 100644
--- a/lib/librte_mempool/rte_mempool_ops_default.c
+++ b/lib/librte_mempool/rte_mempool_ops_default.c
@@ -45,11 +45,29 @@ rte_mempool_op_calc_mem_size_default(const struct 
rte_mempool *mp,
        return mem_size;
 }
 
+/* Returns -1 if object falls on a page boundary, else returns 0 */
+static inline int
+mempool_check_obj_bounds(void *obj, uint64_t hugepage_sz, size_t elt_sz)
+{
+       uintptr_t page_end, elt_addr = (uintptr_t)obj;
+       uint32_t pg_shift = rte_bsf32(hugepage_sz);
+       uint64_t page_mask;
+
+       page_mask =  ~((1ull << pg_shift) - 1);
+       page_end = (elt_addr & page_mask) + hugepage_sz;
+
+       if (elt_addr + elt_sz > page_end)
+               return -1;
+
+       return 0;
+}
+
 int
 rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,
                void *vaddr, rte_iova_t iova, size_t len,
                rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
 {
+       struct rte_memzone *mz;
        size_t total_elt_sz;
        size_t off;
        unsigned int i;
@@ -58,6 +76,18 @@ rte_mempool_op_populate_default(struct rte_mempool *mp, 
unsigned int max_objs,
        total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
 
        for (off = 0, i = 0; off + total_elt_sz <= len && i < max_objs; i++) {
+
+               if (mp->flags & MEMPOOL_F_NO_PAGE_BOUND) {
+                       mz = (struct rte_memzone *)obj_cb_arg;
+                       if (mempool_check_obj_bounds((char *)vaddr + off,
+                                                   mz->hugepage_sz,
+                                                   total_elt_sz) < 0) {
+                               i--; /* Decrement count & skip this obj */
+                               off += total_elt_sz;
+                               continue;
+                       }
+               }
+
                off += mp->header_size;
                obj = (char *)vaddr + off;
                obj_cb(mp, obj_cb_arg, obj,
-- 
2.8.4

Reply via email to