On 2024-12-02 13:53, David Marchand wrote:
Convert spinlock (and as a consequence seqlock) to the clang
capability annotations.


Acked-by: Mattias Rönnnblom <mattias.ronnb...@ericsson.com>

Signed-off-by: David Marchand <david.march...@redhat.com>
---
  drivers/bus/dpaa/base/qbman/qman.c     |  4 ++--
  drivers/net/fm10k/fm10k_ethdev.c       |  4 ++--
  lib/eal/include/generic/rte_spinlock.h | 14 +++++++-------
  lib/eal/include/rte_eal_memconfig.h    |  4 ++--
  lib/eal/include/rte_seqlock.h          |  4 ++--
  lib/graph/graph_private.h              |  4 ++--
  lib/vhost/vdpa.c                       |  2 +-
  7 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/drivers/bus/dpaa/base/qbman/qman.c 
b/drivers/bus/dpaa/base/qbman/qman.c
index f92b25343a..11fabcaff5 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -45,7 +45,7 @@ static inline int fq_isset(struct qman_fq *fq, u32 mask)
  }
static inline void fq_lock(struct qman_fq *fq)
-       __rte_exclusive_lock_function(&fq->fqlock)
+       __rte_acquire_capability(&fq->fqlock)
        __rte_no_thread_safety_analysis
  {
        if (fq_isset(fq, QMAN_FQ_FLAG_LOCKED))
@@ -53,7 +53,7 @@ static inline void fq_lock(struct qman_fq *fq)
  }
static inline void fq_unlock(struct qman_fq *fq)
-        __rte_unlock_function(&fq->fqlock)
+       __rte_release_capability(&fq->fqlock)
        __rte_no_thread_safety_analysis
  {
        if (fq_isset(fq, QMAN_FQ_FLAG_LOCKED))
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 7b490bea17..747042d621 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -116,7 +116,7 @@ fm10k_mbx_initlock(struct fm10k_hw *hw)
static void
  fm10k_mbx_lock(struct fm10k_hw *hw)
-       __rte_exclusive_lock_function(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back))
+       __rte_acquire_capability(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back))
  {
        while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
                rte_delay_us(FM10K_MBXLOCK_DELAY_US);
@@ -124,7 +124,7 @@ fm10k_mbx_lock(struct fm10k_hw *hw)
static void
  fm10k_mbx_unlock(struct fm10k_hw *hw)
-       __rte_unlock_function(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back))
+       __rte_release_capability(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back))
  {
        rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
  }
diff --git a/lib/eal/include/generic/rte_spinlock.h 
b/lib/eal/include/generic/rte_spinlock.h
index c2980601b2..c907d4e45c 100644
--- a/lib/eal/include/generic/rte_spinlock.h
+++ b/lib/eal/include/generic/rte_spinlock.h
@@ -32,7 +32,7 @@ extern "C" {
  /**
   * The rte_spinlock_t type.
   */
-typedef struct __rte_lockable {
+typedef struct __rte_capability("spinlock") {
        volatile RTE_ATOMIC(int) locked; /**< lock status 0 = unlocked, 1 = 
locked */
  } rte_spinlock_t;
@@ -61,7 +61,7 @@ rte_spinlock_init(rte_spinlock_t *sl)
   */
  static inline void
  rte_spinlock_lock(rte_spinlock_t *sl)
-       __rte_exclusive_lock_function(sl);
+       __rte_acquire_capability(sl);
#ifdef RTE_FORCE_INTRINSICS
  static inline void
@@ -87,7 +87,7 @@ rte_spinlock_lock(rte_spinlock_t *sl)
   */
  static inline void
  rte_spinlock_unlock(rte_spinlock_t *sl)
-       __rte_unlock_function(sl);
+       __rte_release_capability(sl);
#ifdef RTE_FORCE_INTRINSICS
  static inline void
@@ -109,7 +109,7 @@ rte_spinlock_unlock(rte_spinlock_t *sl)
  __rte_warn_unused_result
  static inline int
  rte_spinlock_trylock(rte_spinlock_t *sl)
-       __rte_exclusive_trylock_function(1, sl);
+       __rte_try_acquire_capability(true, sl);
#ifdef RTE_FORCE_INTRINSICS
  static inline int
@@ -158,7 +158,7 @@ static inline int rte_tm_supported(void);
   */
  static inline void
  rte_spinlock_lock_tm(rte_spinlock_t *sl)
-       __rte_exclusive_lock_function(sl);
+       __rte_acquire_capability(sl);
/**
   * Commit hardware memory transaction or release the spinlock if
@@ -169,7 +169,7 @@ rte_spinlock_lock_tm(rte_spinlock_t *sl)
   */
  static inline void
  rte_spinlock_unlock_tm(rte_spinlock_t *sl)
-       __rte_unlock_function(sl);
+       __rte_release_capability(sl);
/**
   * Try to execute critical section in a hardware memory transaction,
@@ -190,7 +190,7 @@ rte_spinlock_unlock_tm(rte_spinlock_t *sl)
  __rte_warn_unused_result
  static inline int
  rte_spinlock_trylock_tm(rte_spinlock_t *sl)
-       __rte_exclusive_trylock_function(1, sl);
+       __rte_try_acquire_capability(true, sl);
/**
   * The rte_spinlock_recursive_t type.
diff --git a/lib/eal/include/rte_eal_memconfig.h 
b/lib/eal/include/rte_eal_memconfig.h
index 0b1d0d4ff0..55d78de334 100644
--- a/lib/eal/include/rte_eal_memconfig.h
+++ b/lib/eal/include/rte_eal_memconfig.h
@@ -132,14 +132,14 @@ rte_mcfg_mempool_write_unlock(void)
   */
  void
  rte_mcfg_timer_lock(void)
-       __rte_exclusive_lock_function(rte_mcfg_timer_get_lock());
+       __rte_acquire_capability(rte_mcfg_timer_get_lock());
/**
   * Unlock the internal EAL Timer Library lock for exclusive access.
   */
  void
  rte_mcfg_timer_unlock(void)
-       __rte_unlock_function(rte_mcfg_timer_get_lock());
+       __rte_release_capability(rte_mcfg_timer_get_lock());
/**
   * If true, pages are put in single files (per memseg list),
diff --git a/lib/eal/include/rte_seqlock.h b/lib/eal/include/rte_seqlock.h
index e0e94900d1..670a680443 100644
--- a/lib/eal/include/rte_seqlock.h
+++ b/lib/eal/include/rte_seqlock.h
@@ -197,7 +197,7 @@ rte_seqlock_read_retry(const rte_seqlock_t *seqlock, 
uint32_t begin_sn)
   */
  static inline void
  rte_seqlock_write_lock(rte_seqlock_t *seqlock)
-       __rte_exclusive_lock_function(&seqlock->lock)
+       __rte_acquire_capability(&seqlock->lock)
  {
        /* To synchronize with other writers. */
        rte_spinlock_lock(&seqlock->lock);
@@ -219,7 +219,7 @@ rte_seqlock_write_lock(rte_seqlock_t *seqlock)
   */
  static inline void
  rte_seqlock_write_unlock(rte_seqlock_t *seqlock)
-       __rte_unlock_function(&seqlock->lock)
+       __rte_release_capability(&seqlock->lock)
  {
        rte_seqcount_write_end(&seqlock->count);
diff --git a/lib/graph/graph_private.h b/lib/graph/graph_private.h
index da48d73587..813dd78b9d 100644
--- a/lib/graph/graph_private.h
+++ b/lib/graph/graph_private.h
@@ -221,7 +221,7 @@ graph_spinlock_get(void);
   * Take a lock on the graph internal spin lock.
   */
  void graph_spinlock_lock(void)
-       __rte_exclusive_lock_function(graph_spinlock_get());
+       __rte_acquire_capability(graph_spinlock_get());
/**
   * @internal
@@ -229,7 +229,7 @@ void graph_spinlock_lock(void)
   * Release a lock on the graph internal spin lock.
   */
  void graph_spinlock_unlock(void)
-       __rte_unlock_function(graph_spinlock_get());
+       __rte_release_capability(graph_spinlock_get());
/* Graph operations */
  /**
diff --git a/lib/vhost/vdpa.c b/lib/vhost/vdpa.c
index 8abb073675..329dd572b7 100644
--- a/lib/vhost/vdpa.c
+++ b/lib/vhost/vdpa.c
@@ -32,7 +32,7 @@ static struct vdpa_device_list * const vdpa_device_list
static struct rte_vdpa_device *
  __vdpa_find_device_by_name(const char *name)
-       __rte_exclusive_locks_required(&vdpa_device_list_lock)
+       __rte_requires_capability(&vdpa_device_list_lock)
  {
        struct rte_vdpa_device *dev, *ret = NULL;

Reply via email to