On Sun, 2013-06-09 at 12:36 -0700, Paul E. McKenney wrote:
>  
> +#else /* #ifndef CONFIG_TICKET_LOCK_QUEUED */
> +
> +bool tkt_spin_pass(arch_spinlock_t *ap, struct __raw_tickets inc);
> +
> +static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
> +{
> +     register struct __raw_tickets inc = { .tail = 2 };
> +
> +     inc = xadd(&lock->tickets, inc);
> +     for (;;) {
> +             if (inc.head == inc.tail || tkt_spin_pass(lock, inc))
> +                     break;
> +             inc.head = ACCESS_ONCE(lock->tickets.head);
> +     }
> +     barrier(); /* smp_mb() on Power or ARM. */
> +}
> +
> +#endif /* #else #ifndef CONFIG_TICKET_LOCK_QUEUED */
> +

To avoid the above code duplication, I would have this instead:

#ifdef CONFIG_TICKET_LOCK_QUEUED

bool tkt_spin_pass(arch_spinlock_t *ap, struct __raw_tickets inc);
#define __TKT_SPIN_INC 2

#else

static inline bool tkt_spin_pass(arch_spinlock_t *ap, struct
__raw_tickets inc)
{
        return false;
}

#define __TKT_SPIN_INC 1

#endif

static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
        register struct __raw_tickets inc = { .tail = __TKT_SPIN_INC };

        inc = xadd(&lock->tickets, inc);

        for (;;) {
                if (inc.head == inc.tail || tkt_spin_pass(lock, inc))
                        break;
                cpu_relax();
                inc.head = ACCESS_ONCE(lock->tickets.head);
        }
        barrier;        /* make sure nothing creeps before the lock is taken */
}

-- Steve


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to