Allow create a mempool with page size aligned base address. Signed-off-by: Qi Zhang <qi.z.zh...@intel.com> --- lib/librte_mempool/rte_mempool.c | 2 ++ lib/librte_mempool/rte_mempool.h | 1 + 2 files changed, 3 insertions(+)
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 54f7f4ba4..f8d4814ad 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -567,6 +567,8 @@ rte_mempool_populate_default(struct rte_mempool *mp) pg_shift = 0; /* not needed, zone is physically contiguous */ pg_sz = 0; align = RTE_CACHE_LINE_SIZE; + if (mp->flags & MEMPOOL_F_PAGE_ALIGN) + align = getpagesize(); } else { pg_sz = getpagesize(); pg_shift = rte_bsf32(pg_sz); diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 8b1b7f7ed..774ab0f66 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -245,6 +245,7 @@ struct rte_mempool { #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/ #define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */ #define MEMPOOL_F_NO_PHYS_CONTIG 0x0020 /**< Don't need physically contiguous objs. */ +#define MEMPOOL_F_PAGE_ALIGN 0x0040 /**< Base address is page aligned. */ /** * This capability flag is advertised by a mempool handler, if the whole * memory area containing the objects must be physically contiguous. -- 2.13.6