This patch adds several markers around semaphore primitives.
Along with a tracing application this patch can be useful for measuring
kernel semaphore usage and contention.

Signed-off-by: Mike Mason <[EMAIL PROTECTED]>
Signed-off-by: David Wilder <[EMAIL PROTECTED]>
---
 lib/semaphore-sleepers.c |    9 +++++++++
 1 files changed, 9 insertions(+), 0 deletions(-)

diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c
index 1281805..5343a96 100644
--- a/lib/semaphore-sleepers.c
+++ b/lib/semaphore-sleepers.c
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/err.h>
 #include <linux/init.h>
+#include <linux/marker.h>
 #include <asm/semaphore.h>
 
 /*
@@ -50,6 +51,7 @@
 
 fastcall void __up(struct semaphore *sem)
 {
+       trace_mark(sem_up, "%p", sem);
        wake_up(&sem->wait);
 }
 
@@ -59,6 +61,7 @@ fastcall void __sched __down(struct semaphore * sem)
        DECLARE_WAITQUEUE(wait, tsk);
        unsigned long flags;
 
+       trace_mark(sem_down, "%p", sem);
        tsk->state = TASK_UNINTERRUPTIBLE;
        spin_lock_irqsave(&sem->wait.lock, flags);
        add_wait_queue_exclusive_locked(&sem->wait, &wait);
@@ -73,12 +76,14 @@ fastcall void __sched __down(struct semaphore * sem)
                 * the wait_queue_head.
                 */
                if (!atomic_add_negative(sleepers - 1, &sem->count)) {
+                       trace_mark(sem_down_resume, "%p", sem);
                        sem->sleepers = 0;
                        break;
                }
                sem->sleepers = 1;      /* us - see -1 above */
                spin_unlock_irqrestore(&sem->wait.lock, flags);
 
+               trace_mark(sem_down_sched, "%p", sem);
                schedule();
 
                spin_lock_irqsave(&sem->wait.lock, flags);
@@ -97,6 +102,7 @@ fastcall int __sched __down_interruptible(struct semaphore * 
sem)
        DECLARE_WAITQUEUE(wait, tsk);
        unsigned long flags;
 
+       trace_mark(sem_down_intr, "%p", sem);
        tsk->state = TASK_INTERRUPTIBLE;
        spin_lock_irqsave(&sem->wait.lock, flags);
        add_wait_queue_exclusive_locked(&sem->wait, &wait);
@@ -113,6 +119,7 @@ fastcall int __sched __down_interruptible(struct semaphore 
* sem)
                 * and exit.
                 */
                if (signal_pending(current)) {
+                       trace_mark(sem_down_intr_fail, "%p", sem);
                        retval = -EINTR;
                        sem->sleepers = 0;
                        atomic_add(sleepers, &sem->count);
@@ -126,12 +133,14 @@ fastcall int __sched __down_interruptible(struct 
semaphore * sem)
                 * still hoping to get the semaphore.
                 */
                if (!atomic_add_negative(sleepers - 1, &sem->count)) {
+                       trace_mark(sem_down_intr_resume, "%p", sem);
                        sem->sleepers = 0;
                        break;
                }
                sem->sleepers = 1;      /* us - see -1 above */
                spin_unlock_irqrestore(&sem->wait.lock, flags);
 
+               trace_mark(sem_down_intr_sched, "%p", sem);
                schedule();
 
                spin_lock_irqsave(&sem->wait.lock, flags);


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to