rte_mempool_populate_virt() currently requires that both addr
and length are page-aligned.

Remove this uneeded constraint which can be annoying with big
hugepages (ex: 1GB).

Signed-off-by: Olivier Matz <olivier.m...@6wind.com>
---
 lib/librte_mempool/rte_mempool.c | 18 +++++++-----------
 lib/librte_mempool/rte_mempool.h |  3 +--
 2 files changed, 8 insertions(+), 13 deletions(-)

diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 0f29e8712..76cbacdf3 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -368,17 +368,11 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char 
*addr,
        size_t off, phys_len;
        int ret, cnt = 0;
 
-       /* address and len must be page-aligned */
-       if (RTE_PTR_ALIGN_CEIL(addr, pg_sz) != addr)
-               return -EINVAL;
-       if (RTE_ALIGN_CEIL(len, pg_sz) != len)
-               return -EINVAL;
-
        if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG)
                return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA,
                        len, free_cb, opaque);
 
-       for (off = 0; off + pg_sz <= len &&
+       for (off = 0; off < len &&
                     mp->populated_size < mp->size; off += phys_len) {
 
                iova = rte_mem_virt2iova(addr + off);
@@ -389,7 +383,10 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char 
*addr,
                }
 
                /* populate with the largest group of contiguous pages */
-               for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) 
{
+               for (phys_len = RTE_PTR_ALIGN_CEIL(addr + off + 1, pg_sz) -
+                            (addr + off);
+                    off + phys_len < len;
+                    phys_len = RTE_MIN(phys_len + pg_sz, len - off)) {
                        rte_iova_t iova_tmp;
 
                        iova_tmp = rte_mem_virt2iova(addr + off + phys_len);
@@ -575,8 +572,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
                         * have
                         */
                        mz = rte_memzone_reserve_aligned(mz_name, 0,
-                                       mp->socket_id, flags,
-                                       RTE_MAX(pg_sz, align));
+                                       mp->socket_id, flags, align);
                }
                if (mz == NULL) {
                        ret = -rte_errno;
@@ -601,7 +597,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
                                (void *)(uintptr_t)mz);
                else
                        ret = rte_mempool_populate_virt(mp, mz->addr,
-                               RTE_ALIGN_FLOOR(mz->len, pg_sz), pg_sz,
+                               mz->len, pg_sz,
                                rte_mempool_memchunk_mz_free,
                                (void *)(uintptr_t)mz);
                if (ret < 0) {
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 8053f7a04..0fe8aa7b8 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -1042,9 +1042,8 @@ int rte_mempool_populate_iova(struct rte_mempool *mp, 
char *vaddr,
  *   A pointer to the mempool structure.
  * @param addr
  *   The virtual address of memory that should be used to store objects.
- *   Must be page-aligned.
  * @param len
- *   The length of memory in bytes. Must be page-aligned.
+ *   The length of memory in bytes.
  * @param pg_sz
  *   The size of memory pages in this virtual area.
  * @param free_cb
-- 
2.20.1

Reply via email to