Otherwise cache_flushthresh can be bigger than n, and
a consumer can starve others by keeping every element
either in use or in the cache.

Signed-off-by: Zoltan Kiss <zoltan.kiss at linaro.org>
---
v2: use macro for calculation, with proper casting

 lib/librte_mempool/rte_mempool.c | 8 +++++---
 lib/librte_mempool/rte_mempool.h | 2 +-
 2 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index cf7ed76..5cfb96b 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -68,6 +68,8 @@ static struct rte_tailq_elem rte_mempool_tailq = {
 EAL_REGISTER_TAILQ(rte_mempool_tailq)

 #define CACHE_FLUSHTHRESH_MULTIPLIER 1.5
+#define CALC_CACHE_FLUSHTHRESH(c)      \
+       ((typeof (c))((c) *  CACHE_FLUSHTHRESH_MULTIPLIER))

 /*
  * return the greatest common divisor between a and b (fast algorithm)
@@ -440,7 +442,8 @@ rte_mempool_xmem_create(const char *name, unsigned n, 
unsigned elt_size,
        mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);

        /* asked cache too big */
-       if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
+       if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
+           CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
                rte_errno = EINVAL;
                return NULL;
        }
@@ -565,8 +568,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, 
unsigned elt_size,
        mp->header_size = objsz.header_size;
        mp->trailer_size = objsz.trailer_size;
        mp->cache_size = cache_size;
-       mp->cache_flushthresh = (uint32_t)
-               (cache_size * CACHE_FLUSHTHRESH_MULTIPLIER);
+       mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size);
        mp->private_data_size = private_data_size;

        /* calculate address of the first element for continuous mempool. */
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 9001312..a4a9610 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -468,7 +468,7 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, 
void *);
  *   If cache_size is non-zero, the rte_mempool library will try to
  *   limit the accesses to the common lockless pool, by maintaining a
  *   per-lcore object cache. This argument must be lower or equal to
- *   CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
+ *   CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
  *   cache_size to have "n modulo cache_size == 0": if this is
  *   not the case, some elements will always stay in the pool and will
  *   never be used. The access to the per-lcore table is of course
-- 
1.9.1

Reply via email to