On Fri, Dec 21, 2012 at 06:50:38PM -0500, Rik van Riel wrote:
> Subject: x86,smp: move waiting on contended ticket lock out of line
> 
> Moving the wait loop for congested loops to its own function allows
> us to add things to that wait loop, without growing the size of the
> kernel text appreciably.
> 
> Signed-off-by: Rik van Riel <r...@redhat.com>
> ---

Reviewed-by: Rafael Aquini <aqu...@redhat.com>


>  arch/x86/include/asm/spinlock.h |   13 +++++++------
>  arch/x86/kernel/smp.c           |   14 ++++++++++++++
>  2 files changed, 21 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
> index 33692ea..2a45eb0 100644
> --- a/arch/x86/include/asm/spinlock.h
> +++ b/arch/x86/include/asm/spinlock.h
> @@ -34,6 +34,8 @@
>  # define UNLOCK_LOCK_PREFIX
>  #endif
>  
> +extern void ticket_spin_lock_wait(arch_spinlock_t *, struct __raw_tickets);
> +
>  /*
>   * Ticket locks are conceptually two parts, one indicating the current head 
> of
>   * the queue, and the other indicating the current tail. The lock is acquired
> @@ -53,12 +55,11 @@ static __always_inline void 
> __ticket_spin_lock(arch_spinlock_t *lock)
>  
>       inc = xadd(&lock->tickets, inc);
>  
> -     for (;;) {
> -             if (inc.head == inc.tail)
> -                     break;
> -             cpu_relax();
> -             inc.head = ACCESS_ONCE(lock->tickets.head);
> -     }
> +     if (inc.head == inc.tail)
> +             goto out;
> +
> +     ticket_spin_lock_wait(lock, inc);
> + out:
>       barrier();              /* make sure nothing creeps before the lock is 
> taken */
>  }
>  
> diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
> index 48d2b7d..20da354 100644
> --- a/arch/x86/kernel/smp.c
> +++ b/arch/x86/kernel/smp.c
> @@ -113,6 +113,20 @@ static atomic_t stopping_cpu = ATOMIC_INIT(-1);
>  static bool smp_no_nmi_ipi = false;
>  
>  /*
> + * Wait on a congested ticket spinlock.
> + */
> +void ticket_spin_lock_wait(arch_spinlock_t *lock, struct __raw_tickets inc)
> +{
> +     for (;;) {
> +             cpu_relax();
> +             inc.head = ACCESS_ONCE(lock->tickets.head);
> +
> +             if (inc.head == inc.tail)
> +                     break;
> +     }
> +}
> +
> +/*
>   * this function sends a 'reschedule' IPI to another CPU.
>   * it goes straight through and wastes no time serializing
>   * anything. Worst case is that we lose a reschedule ...
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to