On Thu, Mar 26, 2026 at 03:10:03PM +0000, Dmitry Ilvokhin wrote:
> Introduce queued_spin_release() as an arch-overridable unlock primitive,
> and make queued_spin_unlock() a generic wrapper around it. This is a
> preparatory refactoring for the next commit, which adds
> contended_release tracepoint instrumentation to queued_spin_unlock().
> 
> Rename the existing arch-specific queued_spin_unlock() overrides on
> x86 (paravirt) and MIPS to queued_spin_release().
> 
> No functional change.
> 
> Signed-off-by: Dmitry Ilvokhin <[email protected]>

Reviewed-by: Paul E. McKenney <[email protected]>

> ---
>  arch/mips/include/asm/spinlock.h         |  6 +++---
>  arch/x86/include/asm/paravirt-spinlock.h |  6 +++---
>  include/asm-generic/qspinlock.h          | 15 ++++++++++++---
>  3 files changed, 18 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/mips/include/asm/spinlock.h 
> b/arch/mips/include/asm/spinlock.h
> index 6ce2117e49f6..c349162f15eb 100644
> --- a/arch/mips/include/asm/spinlock.h
> +++ b/arch/mips/include/asm/spinlock.h
> @@ -13,12 +13,12 @@
>  
>  #include <asm-generic/qspinlock_types.h>
>  
> -#define      queued_spin_unlock queued_spin_unlock
> +#define      queued_spin_release queued_spin_release
>  /**
> - * queued_spin_unlock - release a queued spinlock
> + * queued_spin_release - release a queued spinlock
>   * @lock : Pointer to queued spinlock structure
>   */
> -static inline void queued_spin_unlock(struct qspinlock *lock)
> +static inline void queued_spin_release(struct qspinlock *lock)
>  {
>       /* This could be optimised with ARCH_HAS_MMIOWB */
>       mmiowb();
> diff --git a/arch/x86/include/asm/paravirt-spinlock.h 
> b/arch/x86/include/asm/paravirt-spinlock.h
> index 7beffcb08ed6..ac75e0736198 100644
> --- a/arch/x86/include/asm/paravirt-spinlock.h
> +++ b/arch/x86/include/asm/paravirt-spinlock.h
> @@ -49,9 +49,9 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
>                               ALT_NOT(X86_FEATURE_VCPUPREEMPT));
>  }
>  
> -#define queued_spin_unlock queued_spin_unlock
> +#define queued_spin_release queued_spin_release
>  /**
> - * queued_spin_unlock - release a queued spinlock
> + * queued_spin_release - release a queued spinlock
>   * @lock : Pointer to queued spinlock structure
>   *
>   * A smp_store_release() on the least-significant byte.
> @@ -66,7 +66,7 @@ static inline void queued_spin_lock_slowpath(struct 
> qspinlock *lock, u32 val)
>       pv_queued_spin_lock_slowpath(lock, val);
>  }
>  
> -static inline void queued_spin_unlock(struct qspinlock *lock)
> +static inline void queued_spin_release(struct qspinlock *lock)
>  {
>       kcsan_release();
>       pv_queued_spin_unlock(lock);
> diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
> index bf47cca2c375..df76f34645a0 100644
> --- a/include/asm-generic/qspinlock.h
> +++ b/include/asm-generic/qspinlock.h
> @@ -115,12 +115,12 @@ static __always_inline void queued_spin_lock(struct 
> qspinlock *lock)
>  }
>  #endif
>  
> -#ifndef queued_spin_unlock
> +#ifndef queued_spin_release
>  /**
> - * queued_spin_unlock - release a queued spinlock
> + * queued_spin_release - release a queued spinlock
>   * @lock : Pointer to queued spinlock structure
>   */
> -static __always_inline void queued_spin_unlock(struct qspinlock *lock)
> +static __always_inline void queued_spin_release(struct qspinlock *lock)
>  {
>       /*
>        * unlock() needs release semantics:
> @@ -129,6 +129,15 @@ static __always_inline void queued_spin_unlock(struct 
> qspinlock *lock)
>  }
>  #endif
>  
> +/**
> + * queued_spin_unlock - unlock a queued spinlock
> + * @lock : Pointer to queued spinlock structure
> + */
> +static __always_inline void queued_spin_unlock(struct qspinlock *lock)
> +{
> +     queued_spin_release(lock);
> +}
> +
>  #ifndef virt_spin_lock
>  static __always_inline bool virt_spin_lock(struct qspinlock *lock)
>  {
> -- 
> 2.52.0
> 

Reply via email to