On Thu, Sep 7, 2023 at 5:48 PM Stephen Hemminger
<step...@networkplumber.org> wrote:
>
> On Thu,  7 Sep 2023 08:24:56 -0700
> Stephen Hemminger <step...@networkplumber.org> wrote:
>
> >
> > +static __rte_always_inline
> > +void __rte_rand_put_state(struct rte_rand_state *state)
> > +{
> > +     if (state == &rand_states[RTE_MAX_LCORE])
> > +             rte_spinlock_unlock(&rte_rand_lock);
> > +}
>
> Conditional locking like this make clang lock analyzer unhappy though.

Ugly, but some macro can do the job...

diff --git a/lib/eal/common/rte_random.c b/lib/eal/common/rte_random.c
index 02b6b6b97b..3f2a4830fd 100644
--- a/lib/eal/common/rte_random.c
+++ b/lib/eal/common/rte_random.c
@@ -128,20 +128,22 @@ struct rte_rand_state *__rte_rand_get_state(void)
        idx = rte_lcore_id();

        /* last instance reserved for unregistered non-EAL threads */
-       if (unlikely(idx == LCORE_ID_ANY)) {
+       if (unlikely(idx == LCORE_ID_ANY))
                idx = RTE_MAX_LCORE;
-               rte_spinlock_lock(&rte_rand_lock);
-       }

        return &rand_states[idx];
 }

-static __rte_always_inline
-void __rte_rand_put_state(struct rte_rand_state *state)
-{
-       if (state == &rand_states[RTE_MAX_LCORE])
-               rte_spinlock_unlock(&rte_rand_lock);
-}
+#define PROTECT_NON_EAL_THREADS(...) do { \
+       unsigned int idx = rte_lcore_id(); \
+       if (idx == LCORE_ID_ANY) { \
+               rte_spinlock_lock(&rte_rand_lock); \
+               __VA_ARGS__ \
+               rte_spinlock_unlock(&rte_rand_lock); \
+       } else { \
+               __VA_ARGS__ \
+       } \
+} while (0)

 uint64_t
 rte_rand(void)
@@ -149,9 +151,10 @@ rte_rand(void)
        struct rte_rand_state *state;
        uint64_t res;

+       PROTECT_NON_EAL_THREADS(
        state = __rte_rand_get_state();
        res = __rte_rand_lfsr258(state);
-       __rte_rand_put_state(state);
+       );

        return res;
 }
@@ -168,6 +171,7 @@ rte_rand_max(uint64_t upper_bound)
        if (unlikely(upper_bound < 2))
                return 0;

+       PROTECT_NON_EAL_THREADS(
        state = __rte_rand_get_state();

        ones = rte_popcount64(upper_bound);
@@ -192,7 +196,7 @@ rte_rand_max(uint64_t upper_bound)
                        res = __rte_rand_lfsr258(state) & mask;
                } while (unlikely(res >= upper_bound));
        }
-       __rte_rand_put_state(state);
+       );

        return res;
 }


-- 
David Marchand

Reply via email to