i've split up spinlocking-fixes.patch into 3 parts and reworked them. 
This is the first one, against BK-curr:

it fixes the BUILD_LOCK_OPS() bug by introducing the following 3 new
locking primitives:

  spin_trylock_test(lock)
  read_trylock_test(lock)
  write_trylock_test(lock)

this is what is needed by BUILD_LOCK_OPS(): a nonintrusive test to check
whether the real (intrusive) trylock op would succeed or not. Semantics
and naming is completely symmetric to the trylock counterpart. No
changes to exit.c.

build/boot-tested on x86. Architectures that want to support PREEMPT
need to add these definitions.

        Ingo

Signed-off-by: Ingo Molnar <[EMAIL PROTECTED]>

--- linux/kernel/spinlock.c.orig
+++ linux/kernel/spinlock.c
@@ -173,8 +173,8 @@ EXPORT_SYMBOL(_write_lock);
  * (We do this in a function because inlining it would be excessive.)
  */
 
-#define BUILD_LOCK_OPS(op, locktype, is_locked_fn)                     \
-void __lockfunc _##op##_lock(locktype *lock)                           \
+#define BUILD_LOCK_OPS(op, locktype)                                   \
+void __lockfunc _##op##_lock(locktype##_t *lock)                       \
 {                                                                      \
        preempt_disable();                                              \
        for (;;) {                                                      \
@@ -183,7 +183,7 @@ void __lockfunc _##op##_lock(locktype *l
                preempt_enable();                                       \
                if (!(lock)->break_lock)                                \
                        (lock)->break_lock = 1;                         \
-               while (is_locked_fn(lock) && (lock)->break_lock)        \
+               while (!op##_trylock_test(lock) && (lock)->break_lock)  \
                        cpu_relax();                                    \
                preempt_disable();                                      \
        }                                                               \
@@ -191,7 +191,7 @@ void __lockfunc _##op##_lock(locktype *l
                                                                        \
 EXPORT_SYMBOL(_##op##_lock);                                           \
                                                                        \
-unsigned long __lockfunc _##op##_lock_irqsave(locktype *lock)          \
+unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock)      \
 {                                                                      \
        unsigned long flags;                                            \
                                                                        \
@@ -205,7 +205,7 @@ unsigned long __lockfunc _##op##_lock_ir
                preempt_enable();                                       \
                if (!(lock)->break_lock)                                \
                        (lock)->break_lock = 1;                         \
-               while (is_locked_fn(lock) && (lock)->break_lock)        \
+               while (!op##_trylock_test(lock) && (lock)->break_lock)  \
                        cpu_relax();                                    \
                preempt_disable();                                      \
        }                                                               \
@@ -214,14 +214,14 @@ unsigned long __lockfunc _##op##_lock_ir
                                                                        \
 EXPORT_SYMBOL(_##op##_lock_irqsave);                                   \
                                                                        \
-void __lockfunc _##op##_lock_irq(locktype *lock)                       \
+void __lockfunc _##op##_lock_irq(locktype##_t *lock)                   \
 {                                                                      \
        _##op##_lock_irqsave(lock);                                     \
 }                                                                      \
                                                                        \
 EXPORT_SYMBOL(_##op##_lock_irq);                                       \
                                                                        \
-void __lockfunc _##op##_lock_bh(locktype *lock)                                
\
+void __lockfunc _##op##_lock_bh(locktype##_t *lock)                    \
 {                                                                      \
        unsigned long flags;                                            \
                                                                        \
@@ -246,9 +246,9 @@ EXPORT_SYMBOL(_##op##_lock_bh)
  *         _[spin|read|write]_lock_irqsave()
  *         _[spin|read|write]_lock_bh()
  */
-BUILD_LOCK_OPS(spin, spinlock_t, spin_is_locked);
-BUILD_LOCK_OPS(read, rwlock_t, rwlock_is_locked);
-BUILD_LOCK_OPS(write, rwlock_t, spin_is_locked);
+BUILD_LOCK_OPS(spin, spinlock);
+BUILD_LOCK_OPS(read, rwlock);
+BUILD_LOCK_OPS(write, rwlock);
 
 #endif /* CONFIG_PREEMPT */
 
--- linux/include/linux/spinlock.h.orig
+++ linux/include/linux/spinlock.h
@@ -584,4 +584,10 @@ static inline int bit_spin_is_locked(int
 #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
 #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
 
+/**
+ * spin_trylock_test - would spin_trylock() succeed?
+ * @lock: the spinlock in question.
+ */
+#define spin_trylock_test(lock)                (!spin_is_locked(lock))
+
 #endif /* __LINUX_SPINLOCK_H */
--- linux/include/asm-i386/spinlock.h.orig
+++ linux/include/asm-i386/spinlock.h
@@ -188,6 +188,18 @@ typedef struct {
 
 #define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
 
+/**
+ * read_trylock_test - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define read_trylock_test(x) (atomic_read((atomic_t *)&(x)->lock) > 0)
+
+/**
+ * write_trylock_test - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define write_trylock_test(x) ((x)->lock == RW_LOCK_BIAS)
+
 /*
  * On x86, we implement read-write locks as a 32-bit counter
  * with the high bit (sign) being the "contended" bit.
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to