> diff --git a/lib/eal/include/meson.build b/lib/eal/include/meson.build
> index 9700494816..48df5f1a21 100644
> --- a/lib/eal/include/meson.build
> +++ b/lib/eal/include/meson.build
> @@ -36,6 +36,7 @@ headers += files(
>          'rte_per_lcore.h',
>          'rte_random.h',
>          'rte_reciprocal.h',
> +        'rte_seqlock.h',
>          'rte_service.h',
>          'rte_service_component.h',
>          'rte_string_fns.h',
> diff --git a/lib/eal/include/rte_seqlock.h b/lib/eal/include/rte_seqlock.h
> new file mode 100644
> index 0000000000..b975ca848a
> --- /dev/null
> +++ b/lib/eal/include/rte_seqlock.h
> @@ -0,0 +1,84 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2022 Ericsson AB
> + */
> +
> +#ifndef _RTE_SEQLOCK_H_
> +#define _RTE_SEQLOCK_H_
> +
> +#include <stdbool.h>
> +#include <stdint.h>
> +
> +#include <rte_atomic.h>
> +#include <rte_branch_prediction.h>
> +#include <rte_spinlock.h>
> +
> +struct rte_seqlock {
> +     uint64_t sn;
> +     rte_spinlock_t lock;
> +};
> +
> +typedef struct rte_seqlock rte_seqlock_t;
> +
> +__rte_experimental
> +void
> +rte_seqlock_init(rte_seqlock_t *seqlock);

Probably worth to have static initializer too.


> +
> +__rte_experimental
> +static inline uint64_t
> +rte_seqlock_read_begin(const rte_seqlock_t *seqlock)
> +{
> +     /* __ATOMIC_ACQUIRE to prevent loads after (in program order)
> +      * from happening before the sn load. Syncronizes-with the
> +      * store release in rte_seqlock_end().
> +      */
> +     return __atomic_load_n(&seqlock->sn, __ATOMIC_ACQUIRE);
> +}
> +
> +__rte_experimental
> +static inline bool
> +rte_seqlock_read_retry(const rte_seqlock_t *seqlock, uint64_t begin_sn)
> +{
> +     uint64_t end_sn;
> +
> +     /* make sure the data loads happens before the sn load */
> +     rte_atomic_thread_fence(__ATOMIC_ACQUIRE);

That's sort of 'read_end' correct?
If so, shouldn't it be '__ATOMIC_RELEASE' instead here,
and
end_sn = __atomic_load_n(..., (__ATOMIC_ACQUIRE)
on the line below? 

> +
> +     end_sn = __atomic_load_n(&seqlock->sn, __ATOMIC_RELAXED);
> +
> +     return unlikely(begin_sn & 1 || begin_sn != end_sn);
> +}
> +
> +__rte_experimental
> +static inline void
> +rte_seqlock_write_begin(rte_seqlock_t *seqlock)
> +{
> +     uint64_t sn;
> +
> +     /* to synchronize with other writers */
> +     rte_spinlock_lock(&seqlock->lock);
> +
> +     sn = seqlock->sn + 1;
> +
> +     __atomic_store_n(&seqlock->sn, sn, __ATOMIC_RELAXED);
> +
> +     /* __ATOMIC_RELEASE to prevent stores after (in program order)
> +      * from happening before the sn store.
> +      */
> +     rte_atomic_thread_fence(__ATOMIC_RELEASE);

I think it needs to be '__ATOMIC_ACQUIRE' here instead of '__ATOMIC_RELEASE'.

> +}
> +
> +__rte_experimental
> +static inline void
> +rte_seqlock_write_end(rte_seqlock_t *seqlock)
> +{
> +     uint64_t sn;
> +
> +     sn = seqlock->sn + 1;
> +
> +     /* synchronizes-with the load acquire in rte_seqlock_begin() */
> +     __atomic_store_n(&seqlock->sn, sn, __ATOMIC_RELEASE);
> +
> +     rte_spinlock_unlock(&seqlock->lock);
> +}
> +

Reply via email to