From: Christoph Lameter <[EMAIL PROTECTED]>

Signed-off-by: Peter Zijlstra <[EMAIL PROTECTED]>
---
 include/asm-generic/local.h |   25 +++++++++++++++++++++++--
 include/asm-i386/local.h    |   13 +++++++++++++
 include/asm-x86_64/local.h  |   16 ++++++++++++++++
 3 files changed, 52 insertions(+), 2 deletions(-)

Index: linux-2.6.22-rc6-mm1/include/asm-generic/local.h
===================================================================
--- linux-2.6.22-rc6-mm1.orig/include/asm-generic/local.h       2007-07-12 
19:44:18.000000000 -0700
+++ linux-2.6.22-rc6-mm1/include/asm-generic/local.h    2007-07-12 
19:44:57.000000000 -0700
@@ -46,13 +46,34 @@ typedef struct
 #define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u))
 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
 
-/* Non-atomic variants, ie. preemption disabled and won't be touched
- * in interrupt, etc.  Some archs can optimize this case well. */
+/*
+ * Establish a state necessary for __local_xx functions to work.
+ */
+#define __local_begin(flags)   local_irq_disable(flags)
+
+static inline void __local_end(unsigned long flags)
+{
+       local_irq_restore(flags);
+}
+
+/*
+ * Non-atomic variants, ie. within local_begin() / local_end() or
+ * preempt_disable / enable() and won't be touched in interrupt, etc.
+ * Some archs can optimize this case well.
+ */
 #define __local_inc(l)         local_set((l), local_read(l) + 1)
 #define __local_dec(l)         local_set((l), local_read(l) - 1)
 #define __local_add(i,l)       local_set((l), local_read(l) + (i))
 #define __local_sub(i,l)       local_set((l), local_read(l) - (i))
 
+#define __local_cmpxchg((v), (o), (n)) (*(v) = (n), (o))
+#define __local_xchg((v), (n))                                                 
\
+({                                                                     \
+       __typeof(v) x = *(v);                                           \
+       *(v) = (n);                                                     \
+       x;                                                              \
+)}
+
 /* Use these for per-cpu local_t variables: on some archs they are
  * much more efficient than these naive implementations.  Note they take
  * a variable (eg. mystruct.foo), not an address.
Index: linux-2.6.22-rc6-mm1/include/asm-x86_64/local.h
===================================================================
--- linux-2.6.22-rc6-mm1.orig/include/asm-x86_64/local.h        2007-07-12 
19:44:18.000000000 -0700
+++ linux-2.6.22-rc6-mm1/include/asm-x86_64/local.h     2007-07-12 
19:44:57.000000000 -0700
@@ -9,6 +9,7 @@ typedef struct
        atomic_long_t a;
 } local_t;
 
+
 #define LOCAL_INIT(i)  { ATOMIC_LONG_INIT(i) }
 
 #define local_read(l)  atomic_long_read(&(l)->a)
@@ -181,11 +182,26 @@ static __inline__ long local_sub_return(
 
 /* On x86-64 these are better than the atomic variants on SMP kernels
    because they dont use a lock prefix. */
+
+#define __local_begin(__flags)         \
+{                                      \
+       (__flags) = 0;                  \
+       preempt_disable();              \
+}
+
+static inline void __local_end(unsigned long flags) {
+       preempt_enable();
+}
+
 #define __local_inc(l)         local_inc(l)
 #define __local_dec(l)         local_dec(l)
 #define __local_add(i,l)       local_add((i),(l))
 #define __local_sub(i,l)       local_sub((i),(l))
 
+#define __local_cmpxchg                cmpxchg_local
+#define __local_xchg           xchg
+
+
 /* Use these for per-cpu local_t variables: on some archs they are
  * much more efficient than these naive implementations.  Note they take
  * a variable, not an address.
Index: linux-2.6.22-rc6-mm1/include/asm-i386/local.h
===================================================================
--- linux-2.6.22-rc6-mm1.orig/include/asm-i386/local.h  2007-07-12 
19:53:00.000000000 -0700
+++ linux-2.6.22-rc6-mm1/include/asm-i386/local.h       2007-07-12 
19:55:22.000000000 -0700
@@ -194,12 +194,25 @@ static __inline__ long local_sub_return(
 })
 #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
 
+#define __local_begin(__flags)                                 \
+{                                                              \
+       (__flags) = 0;                                          \
+       preempt_disable();                                      \
+}
+
+static inline void __local_end(unsigned long flags) {
+       preempt_enable();
+}
+
 /* On x86, these are no better than the atomic variants. */
 #define __local_inc(l)         local_inc(l)
 #define __local_dec(l)         local_dec(l)
 #define __local_add(i,l)       local_add((i),(l))
 #define __local_sub(i,l)       local_sub((i),(l))
 
+#define __local_cmpxchg                cmpxchg_local
+#define __local_xchg           xchg
+
 /* Use these for per-cpu local_t variables: on some archs they are
  * much more efficient than these naive implementations.  Note they take
  * a variable, not an address.

--

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to