To populate a mempool with a virtual area, the mempool code calls
rte_mempool_populate_iova() for each iova-contiguous area. It happens
(rarely) that this area is too small to store one object. In this case,
rte_mempool_populate_iova() returns an error, which is forwarded by
rte_mempool_populate_virt().

This case should not throw an error in
rte_mempool_populate_virt(). Instead, the area that is too small should
just be ignored.

To fix this issue, change the return value of
rte_mempool_populate_iova() to -ENOBUFS when no object can be populated,
so it can be ignored by the caller. As this would be an API change, add
a compat wrapper to keep the current API unchanged. The wrapper will be
removed for 20.11.

Fixes: 354788b60cfd ("mempool: allow populating with unaligned virtual area")
Cc: sta...@dpdk.org

Signed-off-by: Olivier Matz <olivier.m...@6wind.com>
---

Is there a simple way to ensure that we won't forget to remove the
wrapper for 20.11? Anatoly suggested me to use versioned symbols, but
it's not clear to me how.

 doc/guides/rel_notes/deprecation.rst |  4 ++++
 lib/librte_mempool/rte_mempool.c     | 28 +++++++++++++++++++++++-----
 lib/librte_mempool/rte_mempool.h     |  5 ++++-
 3 files changed, 31 insertions(+), 6 deletions(-)

diff --git a/doc/guides/rel_notes/deprecation.rst 
b/doc/guides/rel_notes/deprecation.rst
index afa94b43e..b6e89d9a2 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -86,3 +86,7 @@ Deprecation Notices
   to set new power environment if power environment was already initialized.
   In this case the function will return -1 unless the environment is unset 
first
   (using ``rte_power_unset_env``). Other function usage scenarios will not 
change.
+
+* mempool: starting from v20.11, rte_mempool_populate_iova() will
+  return -ENOBUFS instead of -EINVAL when there is not enough room to
+  store one object.
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 78d8eb941..bda361ce6 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -297,8 +297,8 @@ mempool_ops_alloc_once(struct rte_mempool *mp)
  * zone. Return the number of objects added, or a negative value
  * on error.
  */
-int
-rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
+static int
+__rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
        rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
        void *opaque)
 {
@@ -332,7 +332,7 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char 
*vaddr,
                off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_MEMPOOL_ALIGN) - vaddr;
 
        if (off > len) {
-               ret = -EINVAL;
+               ret = -ENOBUFS;
                goto fail;
        }
 
@@ -343,7 +343,7 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char 
*vaddr,
 
        /* not enough room to store one object */
        if (i == 0) {
-               ret = -EINVAL;
+               ret = -ENOBUFS;
                goto fail;
        }
 
@@ -356,6 +356,22 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char 
*vaddr,
        return ret;
 }
 
+/* Compat wrapper, to be removed when changing the API is allowed (v20.11). */
+int
+rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
+       rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+       void *opaque)
+{
+       int ret;
+
+       ret = __rte_mempool_populate_iova(mp, vaddr, iova, len, free_cb,
+                                       opaque);
+       if (ret == -ENOBUFS)
+               ret = -EINVAL;
+
+       return ret;
+}
+
 static rte_iova_t
 get_iova(void *addr)
 {
@@ -406,8 +422,10 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char 
*addr,
                                break;
                }
 
-               ret = rte_mempool_populate_iova(mp, addr + off, iova,
+               ret = __rte_mempool_populate_iova(mp, addr + off, iova,
                        phys_len, free_cb, opaque);
+               if (ret == -ENOBUFS)
+                       continue;
                if (ret < 0)
                        goto fail;
                /* no need to call the free callback for next chunks */
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index f81152af9..c08bb444f 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -1108,7 +1108,10 @@ rte_mempool_free(struct rte_mempool *mp);
  * @return
  *   The number of objects added on success.
  *   On error, the chunk is not added in the memory list of the
- *   mempool and a negative errno is returned.
+ *   mempool and a negative errno is returned:
+ *   (-ENOBUFS): not enough room in chunk for one object.
+ *   (-ENOSPC): mempool is already populated.
+ *   (-ENOMEM): allocation failure.
  */
 int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
        rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
-- 
2.20.1

Reply via email to