Dear RT Folks,

I'm pleased to announce the 3.2.53-rt76 stable release.


You can get this release via the git tree at:

  git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git

  branch: v3.2-rt
  Head SHA1: f53d630d9ec8d3a198df235aefde5e135727b74f


Or to build 3.2.53-rt76 directly, the following patches should be applied:

  http://www.kernel.org/pub/linux/kernel/v3.x/linux-3.2.tar.xz

  http://www.kernel.org/pub/linux/kernel/v3.x/patch-3.2.53.xz

  
http://www.kernel.org/pub/linux/kernel/projects/rt/3.2/patch-3.2.53-rt76.patch.xz



You can also build from 3.2.53-rt75 by applying the incremental patch:

  
http://www.kernel.org/pub/linux/kernel/projects/rt/3.2/incr/patch-3.2.53-rt75-rt76.patch.xz



Enjoy,

-- Steve


Changes from v3.2.53-rt75:

---

Gilad Ben-Yossef (3):
      smp: introduce a generic on_each_cpu_mask() function
      smp: add func to IPI cpus based on parameter func
      fs: only send IPI to invalidate LRU BH when needed

Peter Zijlstra (1):
      lockdep: Correctly annotate hardirq context in irq_exit()

Sebastian Andrzej Siewior (2):
      swait: Add a few more users
      rtmutex: use a trylock for waiter lock in trylock

Steven Rostedt (Red Hat) (1):
      Linux 3.2.53-rt76

Tiejun Chen (1):
      cpu_down: move migrate_enable() back

----
 arch/arm/kernel/smp_tlb.c                  | 20 ++-----
 arch/tile/include/asm/smp.h                |  7 ---
 arch/tile/kernel/smp.c                     | 19 -------
 drivers/net/wireless/orinoco/orinoco_usb.c |  2 +-
 drivers/usb/gadget/f_fs.c                  |  2 +-
 drivers/usb/gadget/inode.c                 |  4 +-
 fs/buffer.c                                | 15 ++++-
 include/linux/smp.h                        | 46 +++++++++++++++
 include/linux/spinlock_rt.h                |  1 +
 kernel/cpu.c                               |  2 +-
 kernel/rtmutex.c                           | 31 ++++++++--
 kernel/smp.c                               | 90 ++++++++++++++++++++++++++++++
 kernel/softirq.c                           | 49 ++++++++++++++--
 kernel/timer.c                             |  2 +-
 localversion-rt                            |  2 +-
 15 files changed, 234 insertions(+), 58 deletions(-)
---------------------------
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 7dcb352..02c5d2c 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -13,18 +13,6 @@
 #include <asm/smp_plat.h>
 #include <asm/tlbflush.h>
 
-static void on_each_cpu_mask(void (*func)(void *), void *info, int wait,
-       const struct cpumask *mask)
-{
-       preempt_disable();
-
-       smp_call_function_many(mask, func, info, wait);
-       if (cpumask_test_cpu(smp_processor_id(), mask))
-               func(info);
-
-       preempt_enable();
-}
-
 /**********************************************************************/
 
 /*
@@ -87,7 +75,7 @@ void flush_tlb_all(void)
 void flush_tlb_mm(struct mm_struct *mm)
 {
        if (tlb_ops_need_broadcast())
-               on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
+               on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
        else
                local_flush_tlb_mm(mm);
 }
@@ -98,7 +86,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long 
uaddr)
                struct tlb_args ta;
                ta.ta_vma = vma;
                ta.ta_start = uaddr;
-               on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 
mm_cpumask(vma->vm_mm));
+               on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
+                                       &ta, 1);
        } else
                local_flush_tlb_page(vma, uaddr);
 }
@@ -121,7 +110,8 @@ void flush_tlb_range(struct vm_area_struct *vma,
                ta.ta_vma = vma;
                ta.ta_start = start;
                ta.ta_end = end;
-               on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 
mm_cpumask(vma->vm_mm));
+               on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range,
+                                       &ta, 1);
        } else
                local_flush_tlb_range(vma, start, end);
 }
diff --git a/arch/tile/include/asm/smp.h b/arch/tile/include/asm/smp.h
index 532124a..1aa759a 100644
--- a/arch/tile/include/asm/smp.h
+++ b/arch/tile/include/asm/smp.h
@@ -43,10 +43,6 @@ void evaluate_message(int tag);
 /* Boot a secondary cpu */
 void online_secondary(void);
 
-/* Call a function on a specified set of CPUs (may include this one). */
-extern void on_each_cpu_mask(const struct cpumask *mask,
-                            void (*func)(void *), void *info, bool wait);
-
 /* Topology of the supervisor tile grid, and coordinates of boot processor */
 extern HV_Topology smp_topology;
 
@@ -91,9 +87,6 @@ void print_disabled_cpus(void);
 
 #else /* !CONFIG_SMP */
 
-#define on_each_cpu_mask(mask, func, info, wait)               \
-  do { if (cpumask_test_cpu(0, (mask))) func(info); } while (0)
-
 #define smp_master_cpu         0
 #define smp_height             1
 #define smp_width              1
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index c52224d..a44e103 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -87,25 +87,6 @@ void send_IPI_allbutself(int tag)
        send_IPI_many(&mask, tag);
 }
 
-
-/*
- * Provide smp_call_function_mask, but also run function locally
- * if specified in the mask.
- */
-void on_each_cpu_mask(const struct cpumask *mask, void (*func)(void *),
-                     void *info, bool wait)
-{
-       int cpu = get_cpu();
-       smp_call_function_many(mask, func, info, wait);
-       if (cpumask_test_cpu(cpu, mask)) {
-               local_irq_disable();
-               func(info);
-               local_irq_enable();
-       }
-       put_cpu();
-}
-
-
 /*
  * Functions related to starting/stopping cpus.
  */
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c 
b/drivers/net/wireless/orinoco/orinoco_usb.c
index 0793e42..d45833f 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -714,7 +714,7 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
                        while (!ctx->done.done && msecs--)
                                udelay(1000);
                } else {
-                       wait_event_interruptible(ctx->done.wait,
+                       swait_event_interruptible(ctx->done.wait,
                                                 ctx->done.done);
                }
                break;
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 0e641a1..c6154ca 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1270,7 +1270,7 @@ static void ffs_data_put(struct ffs_data *ffs)
                BUG_ON(mutex_is_locked(&ffs->mutex) ||
                       spin_is_locked(&ffs->ev.waitq.lock) ||
                       waitqueue_active(&ffs->ev.waitq) ||
-                      waitqueue_active(&ffs->ep0req_completion.wait));
+                      swaitqueue_active(&ffs->ep0req_completion.wait));
                kfree(ffs);
        }
 }
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 7138540..616d19e 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -339,7 +339,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
        spin_unlock_irq (&epdata->dev->lock);
 
        if (likely (value == 0)) {
-               value = wait_event_interruptible (done.wait, done.done);
+               value = swait_event_interruptible (done.wait, done.done);
                if (value != 0) {
                        spin_lock_irq (&epdata->dev->lock);
                        if (likely (epdata->ep != NULL)) {
@@ -348,7 +348,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
                                usb_ep_dequeue (epdata->ep, epdata->req);
                                spin_unlock_irq (&epdata->dev->lock);
 
-                               wait_event (done.wait, done.done);
+                               swait_event (done.wait, done.done);
                                if (epdata->status == -ECONNRESET)
                                        epdata->status = -EINTR;
                        } else {
diff --git a/fs/buffer.c b/fs/buffer.c
index 997b199..36d49f1 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1425,10 +1425,23 @@ static void invalidate_bh_lru(void *arg)
        }
        put_cpu_var(bh_lrus);
 }
+
+static bool has_bh_in_lru(int cpu, void *dummy)
+{
+       struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
+       int i;
        
+       for (i = 0; i < BH_LRU_SIZE; i++) {
+               if (b->bhs[i])
+                       return 1;
+       }
+
+       return 0;
+}
+
 void invalidate_bh_lrus(void)
 {
-       on_each_cpu(invalidate_bh_lru, NULL, 1);
+       on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
 }
 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 78fd0a2..3001ba5 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -101,6 +101,22 @@ static inline void call_function_init(void) { }
 int on_each_cpu(smp_call_func_t func, void *info, int wait);
 
 /*
+ * Call a function on processors specified by mask, which might include
+ * the local one.
+ */
+void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
+               void *info, bool wait);
+
+/*
+ * Call a function on each processor for which the supplied function
+ * cond_func returns a positive value. This may include the local
+ * processor.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+               smp_call_func_t func, void *info, bool wait,
+               gfp_t gfp_flags);
+
+/*
  * Mark the boot cpu "online" so that it can call console drivers in
  * printk() and can access its per-cpu storage.
  */
@@ -131,6 +147,36 @@ static inline int up_smp_call_function(smp_call_func_t 
func, void *info)
                local_irq_enable();             \
                0;                              \
        })
+/*
+ * Note we still need to test the mask even for UP
+ * because we actually can get an empty mask from
+ * code that on SMP might call us without the local
+ * CPU in the mask.
+ */
+#define on_each_cpu_mask(mask, func, info, wait) \
+       do {                                            \
+               if (cpumask_test_cpu(0, (mask))) {      \
+                       local_irq_disable();            \
+                       (func)(info);                   \
+                       local_irq_enable();             \
+               }                                       \
+       } while (0)
+/*
+ * Preemption is disabled here to make sure the cond_func is called under the
+ * same condtions in UP and SMP.
+ */
+#define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\
+       do {                                                    \
+               void *__info = (info);                          \
+               preempt_disable();                              \
+               if ((cond_func)(0, __info)) {                   \
+                       local_irq_disable();                    \
+                       (func)(__info);                         \
+                       local_irq_enable();                     \
+               }                                               \
+               preempt_enable();                               \
+       } while (0)
+
 static inline void smp_send_reschedule(int cpu) { }
 #define num_booting_cpus()                     1
 #define smp_prepare_boot_cpu()                 do {} while (0)
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index 3b555b4..28edba7 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -20,6 +20,7 @@ extern void __lockfunc rt_spin_lock(spinlock_t *lock);
 extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
 extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
 extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
+extern void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock);
 extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
 extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long 
*flags);
 extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 063c2bb..031c644 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -546,6 +546,7 @@ static int __ref _cpu_down(unsigned int cpu, int 
tasks_frozen)
                err = -EBUSY;
                goto restore_cpus;
        }
+       migrate_enable();
 
        cpu_hotplug_begin();
        err = cpu_unplug_begin(cpu);
@@ -598,7 +599,6 @@ static int __ref _cpu_down(unsigned int cpu, int 
tasks_frozen)
 out_release:
        cpu_unplug_done(cpu);
 out_cancel:
-       migrate_enable();
        cpu_hotplug_done();
        if (!err)
                cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 6075f17..d759326 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -801,10 +801,8 @@ static void  noinline __sched rt_spin_lock_slowlock(struct 
rt_mutex *lock)
 /*
  * Slow path to release a rt_mutex spin_lock style
  */
-static void  noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
+static void __sched __rt_spin_lock_slowunlock(struct rt_mutex *lock)
 {
-       raw_spin_lock(&lock->wait_lock);
-
        debug_rt_mutex_unlock(lock);
 
        rt_mutex_deadlock_account_unlock(current);
@@ -823,6 +821,23 @@ static void  noinline __sched 
rt_spin_lock_slowunlock(struct rt_mutex *lock)
        rt_mutex_adjust_prio(current);
 }
 
+static void  noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
+{
+       raw_spin_lock(&lock->wait_lock);
+       __rt_spin_lock_slowunlock(lock);
+}
+
+static void  noinline __sched rt_spin_lock_slowunlock_hirq(struct rt_mutex 
*lock)
+{
+       int ret;
+
+       do {
+               ret = raw_spin_trylock(&lock->wait_lock);
+       } while (!ret);
+
+       __rt_spin_lock_slowunlock(lock);
+}
+
 void __lockfunc rt_spin_lock(spinlock_t *lock)
 {
        rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
@@ -853,6 +868,13 @@ void __lockfunc rt_spin_unlock(spinlock_t *lock)
 }
 EXPORT_SYMBOL(rt_spin_unlock);
 
+void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock)
+{
+       /* NOTE: we always pass in '1' for nested, for simplicity */
+       spin_release(&lock->dep_map, 1, _RET_IP_);
+       rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_hirq);
+}
+
 void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
 {
        rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
@@ -1064,7 +1086,8 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
 {
        int ret = 0;
 
-       raw_spin_lock(&lock->wait_lock);
+       if (!raw_spin_trylock(&lock->wait_lock))
+               return ret;
        init_lists(lock);
 
        if (likely(rt_mutex_owner(lock) != current)) {
diff --git a/kernel/smp.c b/kernel/smp.c
index 9e800b2..d5f3238 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -712,3 +712,93 @@ int on_each_cpu(void (*func) (void *info), void *info, int 
wait)
        return ret;
 }
 EXPORT_SYMBOL(on_each_cpu);
+
+/**
+ * on_each_cpu_mask(): Run a function on processors specified by
+ * cpumask, which may include the local processor.
+ * @mask: The set of cpus to run on (only runs on online subset).
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed
+ *        on other CPUs.
+ *
+ * If @wait is true, then returns once @func has returned.
+ *
+ * You must not call this function with disabled interrupts or
+ * from a hardware interrupt handler or from a bottom half handler.
+ */
+void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
+                       void *info, bool wait)
+{
+       int cpu = get_cpu();
+
+       smp_call_function_many(mask, func, info, wait);
+       if (cpumask_test_cpu(cpu, mask)) {
+               local_irq_disable();
+               func(info);
+               local_irq_enable();
+       }
+       put_cpu();
+}
+EXPORT_SYMBOL(on_each_cpu_mask);
+
+/*
+ * on_each_cpu_cond(): Call a function on each processor for which
+ * the supplied function cond_func returns true, optionally waiting
+ * for all the required CPUs to finish. This may include the local
+ * processor.
+ * @cond_func: A callback function that is passed a cpu id and
+ *             the the info parameter. The function is called
+ *             with preemption disabled. The function should
+ *             return a blooean value indicating whether to IPI
+ *             the specified CPU.
+ * @func:      The function to run on all applicable CPUs.
+ *             This must be fast and non-blocking.
+ * @info:      An arbitrary pointer to pass to both functions.
+ * @wait:      If true, wait (atomically) until function has
+ *             completed on other CPUs.
+ * @gfp_flags: GFP flags to use when allocating the cpumask
+ *             used internally by the function.
+ *
+ * The function might sleep if the GFP flags indicates a non
+ * atomic allocation is allowed.
+ *
+ * Preemption is disabled to protect against CPUs going offline but not online.
+ * CPUs going online during the call will not be seen or sent an IPI.
+ *
+ * You must not call this function with disabled interrupts or
+ * from a hardware interrupt handler or from a bottom half handler.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+                       smp_call_func_t func, void *info, bool wait,
+                       gfp_t gfp_flags)
+{
+       cpumask_var_t cpus;
+       int cpu, ret;
+
+       might_sleep_if(gfp_flags & __GFP_WAIT);
+
+       if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
+               preempt_disable();
+               for_each_online_cpu(cpu)
+                       if (cond_func(cpu, info))
+                               cpumask_set_cpu(cpu, cpus);
+               on_each_cpu_mask(cpus, func, info, wait);
+               preempt_enable();
+               free_cpumask_var(cpus);
+       } else {
+               /*
+                * No free cpumask, bother. No matter, we'll
+                * just have to IPI them one by one.
+                */
+               preempt_disable();
+               for_each_online_cpu(cpu)
+                       if (cond_func(cpu, info)) {
+                               ret = smp_call_function_single(cpu, func,
+                                                               info, wait);
+                               WARN_ON_ONCE(!ret);
+                       }
+               preempt_enable();
+       }
+}
+EXPORT_SYMBOL(on_each_cpu_cond);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ca00a68..7d882cc 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -297,6 +297,44 @@ EXPORT_SYMBOL(local_bh_enable_ip);
  */
 #define MAX_SOFTIRQ_RESTART 10
 
+#ifdef CONFIG_TRACE_IRQFLAGS
+/*
+ * Convoluted means of passing __do_softirq() a message through the various
+ * architecture execute_on_stack() bits.
+ *
+ * When we run softirqs from irq_exit() and thus on the hardirq stack we need
+ * to keep the lockdep irq context tracking as tight as possible in order to
+ * not miss-qualify lock contexts and miss possible deadlocks.
+ */
+static DEFINE_PER_CPU(int, softirq_from_hardirq);
+
+static inline void lockdep_softirq_from_hardirq(void)
+{
+       this_cpu_write(softirq_from_hardirq, 1);
+}
+
+static inline void lockdep_softirq_start(void)
+{
+       if (this_cpu_read(softirq_from_hardirq))
+               trace_hardirq_exit();
+       lockdep_softirq_enter();
+}
+
+static inline void lockdep_softirq_end(void)
+{
+       lockdep_softirq_exit();
+       if (this_cpu_read(softirq_from_hardirq)) {
+               this_cpu_write(softirq_from_hardirq, 0);
+               trace_hardirq_enter();
+       }
+}
+
+#else
+static inline void lockdep_softirq_from_hardirq(void) { }
+static inline void lockdep_softirq_start(void) { }
+static inline void lockdep_softirq_end(void) { }
+#endif
+
 asmlinkage void __do_softirq(void)
 {
        __u32 pending;
@@ -308,7 +346,7 @@ asmlinkage void __do_softirq(void)
 
        __local_bh_disable((unsigned long)__builtin_return_address(0),
                           SOFTIRQ_OFFSET);
-       lockdep_softirq_enter();
+       lockdep_softirq_start();
 
        cpu = smp_processor_id();
 restart:
@@ -324,7 +362,7 @@ restart:
        if (pending)
                wakeup_softirqd();
 
-       lockdep_softirq_exit();
+       lockdep_softirq_end();
 
        account_system_vtime(current);
        __local_bh_enable(SOFTIRQ_OFFSET);
@@ -582,9 +620,10 @@ void irq_enter(void)
 static inline void invoke_softirq(void)
 {
 #ifndef CONFIG_PREEMPT_RT_FULL
-       if (!force_irqthreads)
+       if (!force_irqthreads) {
+               lockdep_softirq_from_hardirq();
                __do_softirq();
-       else {
+       } else {
                __local_bh_disable((unsigned long)__builtin_return_address(0),
                                SOFTIRQ_OFFSET);
                wakeup_softirqd();
@@ -618,7 +657,6 @@ static inline void invoke_softirq(void)
 void irq_exit(void)
 {
        account_system_vtime(current);
-       trace_hardirq_exit();
        sub_preempt_count(IRQ_EXIT_OFFSET);
        if (!in_interrupt() && local_softirq_pending())
                invoke_softirq();
@@ -629,6 +667,7 @@ void irq_exit(void)
        if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
                tick_nohz_stop_sched_tick(0);
 #endif
+       trace_hardirq_exit(); /* must be last! */
        __preempt_enable_no_resched();
 }
 
diff --git a/kernel/timer.c b/kernel/timer.c
index 7fa30e0..b7ef082 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1336,7 +1336,7 @@ unsigned long get_next_timer_interrupt(unsigned long now)
                if (time_before_eq(base->next_timer, base->timer_jiffies))
                        base->next_timer = __next_timer_interrupt(base);
                expires = base->next_timer;
-               rt_spin_unlock(&base->lock);
+               rt_spin_unlock_after_trylock_in_irq(&base->lock);
        } else {
                expires = now + 1;
        }
diff --git a/localversion-rt b/localversion-rt
index 54e7da6..c1f2720 100644
--- a/localversion-rt
+++ b/localversion-rt
@@ -1 +1 @@
--rt75
+-rt76
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to