Hi Joyce, > > The spinlock implementation is unfair, some threads may take locks > aggressively while leaving the other threads starving for long time. > > This patch introduces ticketlock which gives each waiting thread a > ticket and they can take the lock one by one. First come, first serviced. > This avoids starvation for too long time and is more predictable. > > Suggested-by: Jerin Jacob <jer...@marvell.com> > Signed-off-by: Joyce Kong <joyce.k...@arm.com> > Reviewed-by: Gavin Hu <gavin...@arm.com> > Reviewed-by: Ola Liljedahl <ola.liljed...@arm.com> > Reviewed-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com> > --- > MAINTAINERS | 4 + > doc/api/doxy-api-index.md | 1 + > lib/librte_eal/common/Makefile | 2 +- > .../common/include/generic/rte_ticketlock.h | 215 > +++++++++++++++++++++ > lib/librte_eal/common/meson.build | 1 + > 5 files changed, 222 insertions(+), 1 deletion(-) > create mode 100644 lib/librte_eal/common/include/generic/rte_ticketlock.h > > diff --git a/MAINTAINERS b/MAINTAINERS > index 452b8eb..3521271 100644 > --- a/MAINTAINERS > +++ b/MAINTAINERS > @@ -210,6 +210,10 @@ M: Cristian Dumitrescu <cristian.dumitre...@intel.com> > F: lib/librte_eal/common/include/rte_bitmap.h > F: app/test/test_bitmap.c > > +Ticketlock > +M: Joyce Kong <joyce.k...@arm.com> > +F: lib/librte_eal/common/include/generic/rte_ticketlock.h > + > ARM v7 > M: Jan Viktorin <vikto...@rehivetech.com> > M: Gavin Hu <gavin...@arm.com> > diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md > index d95ad56..aacc66b 100644 > --- a/doc/api/doxy-api-index.md > +++ b/doc/api/doxy-api-index.md > @@ -65,6 +65,7 @@ The public API headers are grouped by topics: > [atomic] (@ref rte_atomic.h), > [rwlock] (@ref rte_rwlock.h), > [spinlock] (@ref rte_spinlock.h) > + [ticketlock] (@ref rte_ticketlock.h) > > - **CPU arch**: > [branch prediction] (@ref rte_branch_prediction.h), > diff --git a/lib/librte_eal/common/Makefile b/lib/librte_eal/common/Makefile > index c487201..ac3305c 100644 > --- a/lib/librte_eal/common/Makefile > +++ b/lib/librte_eal/common/Makefile > @@ -20,7 +20,7 @@ INC += rte_bitmap.h rte_vfio.h rte_hypervisor.h rte_test.h > INC += rte_reciprocal.h rte_fbarray.h rte_uuid.h > > GENERIC_INC := rte_atomic.h rte_byteorder.h rte_cycles.h rte_prefetch.h > -GENERIC_INC += rte_spinlock.h rte_memcpy.h rte_cpuflags.h rte_rwlock.h > +GENERIC_INC += rte_spinlock.h rte_memcpy.h rte_cpuflags.h rte_rwlock.h > rte_ticketlock.h > GENERIC_INC += rte_vect.h rte_pause.h rte_io.h > > # defined in mk/arch/$(RTE_ARCH)/rte.vars.mk > diff --git a/lib/librte_eal/common/include/generic/rte_ticketlock.h > b/lib/librte_eal/common/include/generic/rte_ticketlock.h > new file mode 100644 > index 0000000..c5203cb > --- /dev/null > +++ b/lib/librte_eal/common/include/generic/rte_ticketlock.h > @@ -0,0 +1,215 @@ > +/* SPDX-License-Identifier: BSD-3-Clause > + * Copyright(c) 2019 Arm Limited > + */ > + > +#ifndef _RTE_TICKETLOCK_H_ > +#define _RTE_TICKETLOCK_H_ > + > +/** > + * @file > + * > + * RTE ticket locks > + * > + * This file defines an API for ticket locks, which give each waiting > + * thread a ticket and take the lock one by one, first come, first > + * serviced. > + * > + * All locks must be initialised before use, and only initialised once. > + * > + */ > + > +#ifdef __cplusplus > +extern "C" { > +#endif > + > +#include <rte_common.h> > +#include <rte_lcore.h> > +#include <rte_pause.h> > + > +/** > + * The rte_ticketlock_t type. > + */ > +typedef union { > + uint32_t tickets; > + struct { > + uint16_t current; > + uint16_t next; > + } s; > +} rte_ticketlock_t; > + > +/** > + * A static ticketlock initializer. > + */ > +#define RTE_TICKETLOCK_INITIALIZER { 0 } > + > +/** > + * Initialize the ticketlock to an unlocked state. > + * > + * @param tl > + * A pointer to the ticketlock. > + */ > +static inline __rte_experimental void > +rte_ticketlock_init(rte_ticketlock_t *tl) > +{ > + __atomic_store_n(&tl->tickets, 0, __ATOMIC_RELAXED); > +} > + > +/** > + * Take the ticketlock. > + * > + * @param tl > + * A pointer to the ticketlock. > + */ > +static inline __rte_experimental void > +rte_ticketlock_lock(rte_ticketlock_t *tl) > +{ > + uint16_t me = __atomic_fetch_add(&tl->s.next, 1, __ATOMIC_RELAXED); > + while (__atomic_load_n(&tl->s.current, __ATOMIC_ACQUIRE) != me) > + rte_pause(); > +} > + > +/** > + * Release the ticketlock. > + * > + * @param tl > + * A pointer to the ticketlock. > + */ > +static inline __rte_experimental void > +rte_ticketlock_unlock(rte_ticketlock_t *tl) > +{ > + uint16_t i = __atomic_load_n(&tl->s.current, __ATOMIC_RELAXED); > + __atomic_store_n(&tl->s.current, i + 1, __ATOMIC_RELEASE); > +} > + > +/** > + * Try to take the lock. > + * > + * @param tl > + * A pointer to the ticketlock. > + * @return > + * 1 if the lock is successfully taken; 0 otherwise. > + */ > +static inline __rte_experimental int > +rte_ticketlock_trylock(rte_ticketlock_t *tl) > +{ > + rte_ticketlock_t old, new; > + old.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_RELAXED); > + new.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_RELAXED);
As a nit: from my point, there is no need for second load, we can just do: new.tickets = old.tickets; Apart from that: Acked-by: Konstantin Ananyev <konstantin.anan...@intel.com> > + new.s.next++; > + if (old.s.next == old.s.current) { > + if (__atomic_compare_exchange_n(&tl->tickets, &old.tickets, > + new.tickets, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) > + return 1; > + } > + > + return 0; > +} > +