rte_mempool_create put tailq entry into rte_mempool_tailq list before
populate, and pool_data set when populate. So in multi process, if
process A create mempool, and process B can get mempool through
rte_mempool_lookup before pool_data set, if B call rte_mempool_avail_count,
it will cause segment fault.

Fix this by put tailq entry into rte_mempool_tailq after populate.

Signed-off-by: Fengnan Chang <changfeng...@bytedance.com>
---
 lib/mempool/rte_mempool.c | 43 ++++++++++++++++++++++-----------------
 1 file changed, 24 insertions(+), 19 deletions(-)

diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c
index 4c78071a34..b3a6572fc8 100644
--- a/lib/mempool/rte_mempool.c
+++ b/lib/mempool/rte_mempool.c
@@ -155,6 +155,27 @@ get_min_page_size(int socket_id)
        return wa.min == SIZE_MAX ? (size_t) rte_mem_page_size() : wa.min;
 }
 
+static int
+add_mempool_to_list(struct rte_mempool *mp)
+{
+       struct rte_mempool_list *mempool_list;
+       struct rte_tailq_entry *te = NULL;
+
+       /* try to allocate tailq entry */
+       te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
+       if (te == NULL) {
+               RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n");
+               return -ENOMEM;
+       }
+
+       te->data = mp;
+       mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
+       rte_mcfg_tailq_write_lock();
+       TAILQ_INSERT_TAIL(mempool_list, te, next);
+       rte_mcfg_tailq_write_unlock();
+
+       return 0;
+}
 
 static void
 mempool_add_elem(struct rte_mempool *mp, __rte_unused void *opaque,
@@ -304,6 +325,9 @@ mempool_ops_alloc_once(struct rte_mempool *mp)
                if (ret != 0)
                        return ret;
                mp->flags |= RTE_MEMPOOL_F_POOL_CREATED;
+               ret = add_mempool_to_list(mp);
+               if (ret != 0)
+                       return ret;
        }
        return 0;
 }
@@ -798,9 +822,7 @@ rte_mempool_create_empty(const char *name, unsigned n, 
unsigned elt_size,
        int socket_id, unsigned flags)
 {
        char mz_name[RTE_MEMZONE_NAMESIZE];
-       struct rte_mempool_list *mempool_list;
        struct rte_mempool *mp = NULL;
-       struct rte_tailq_entry *te = NULL;
        const struct rte_memzone *mz = NULL;
        size_t mempool_size;
        unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
@@ -820,8 +842,6 @@ rte_mempool_create_empty(const char *name, unsigned n, 
unsigned elt_size,
                          RTE_CACHE_LINE_MASK) != 0);
 #endif
 
-       mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
-
        /* asked for zero items */
        if (n == 0) {
                rte_errno = EINVAL;
@@ -866,14 +886,6 @@ rte_mempool_create_empty(const char *name, unsigned n, 
unsigned elt_size,
        private_data_size = (private_data_size +
                             RTE_MEMPOOL_ALIGN_MASK) & 
(~RTE_MEMPOOL_ALIGN_MASK);
 
-
-       /* try to allocate tailq entry */
-       te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
-       if (te == NULL) {
-               RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n");
-               goto exit_unlock;
-       }
-
        mempool_size = RTE_MEMPOOL_HEADER_SIZE(mp, cache_size);
        mempool_size += private_data_size;
        mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
@@ -923,20 +935,13 @@ rte_mempool_create_empty(const char *name, unsigned n, 
unsigned elt_size,
                                           cache_size);
        }
 
-       te->data = mp;
-
-       rte_mcfg_tailq_write_lock();
-       TAILQ_INSERT_TAIL(mempool_list, te, next);
-       rte_mcfg_tailq_write_unlock();
        rte_mcfg_mempool_write_unlock();
-
        rte_mempool_trace_create_empty(name, n, elt_size, cache_size,
                private_data_size, flags, mp);
        return mp;
 
 exit_unlock:
        rte_mcfg_mempool_write_unlock();
-       rte_free(te);
        rte_mempool_free(mp);
        return NULL;
 }
-- 
2.37.0 (Apple Git-136)

Reply via email to