Saving the comm of tasks for each trace is very expensive.
This patch includes in the context switch hook, a way to
store the last 100 command lines of tasks. This table is
examined when a trace is to be printed.

Note: The comm may be destroyed if other traces are performed.
Later (TBD) patches may simply store this information in the trace
itself.

Signed-off-by: Steven Rostedt <[EMAIL PROTECTED]>
---
 lib/tracing/Kconfig              |    1 
 lib/tracing/trace_function.c     |    2 
 lib/tracing/trace_sched_switch.c |   12 +++-
 lib/tracing/tracer.c             |  112 +++++++++++++++++++++++++++++++++++++--
 lib/tracing/tracer.h             |    5 +
 5 files changed, 124 insertions(+), 8 deletions(-)

Index: linux-mcount.git/lib/tracing/Kconfig
===================================================================
--- linux-mcount.git.orig/lib/tracing/Kconfig   2008-01-21 10:03:57.000000000 
-0500
+++ linux-mcount.git/lib/tracing/Kconfig        2008-01-21 10:04:54.000000000 
-0500
@@ -19,6 +19,7 @@ config FUNCTION_TRACER
        default n
        select MCOUNT
        select TRACING
+       select CONTEXT_SWITCH_TRACER
        help
          Use profiler instrumentation, adding -pg to CFLAGS. This will
          insert a call to an architecture specific __mcount routine,
Index: linux-mcount.git/lib/tracing/trace_function.c
===================================================================
--- linux-mcount.git.orig/lib/tracing/trace_function.c  2008-01-21 
09:59:15.000000000 -0500
+++ linux-mcount.git/lib/tracing/trace_function.c       2008-01-21 
10:04:54.000000000 -0500
@@ -80,9 +80,11 @@ static void function_trace_ctrl_update(s
        if (tr->ctrl ^ val) {
                if (val) {
                        trace_enabled = 1;
+                       atomic_inc(&trace_record_cmdline);
                        register_mcount_function(&trace_ops);
                } else {
                        trace_enabled = 0;
+                       atomic_dec(&trace_record_cmdline);
                        unregister_mcount_function(&trace_ops);
                }
                tr->ctrl = val;
Index: linux-mcount.git/lib/tracing/trace_sched_switch.c
===================================================================
--- linux-mcount.git.orig/lib/tracing/trace_sched_switch.c      2008-01-21 
10:03:57.000000000 -0500
+++ linux-mcount.git/lib/tracing/trace_sched_switch.c   2008-01-21 
10:04:54.000000000 -0500
@@ -41,6 +41,9 @@ static notrace void sched_switch_callbac
        va_list ap;
        int cpu;
 
+       if (unlikely(atomic_read(&trace_record_cmdline)))
+               tracing_record_cmdline(current);
+
        if (likely(!trace_enabled))
                return;
 
@@ -49,6 +52,7 @@ static notrace void sched_switch_callbac
        next = va_arg(ap, typeof(next));
        va_end(ap);
 
+
        raw_local_irq_save(flags);
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
@@ -82,10 +86,14 @@ static void sched_switch_trace_ctrl_upda
                sched_switch_reset(tr);
 
        if (tr->ctrl ^ val) {
-               if (val)
+               if (val) {
+                       atomic_inc(&trace_record_cmdline);
                        trace_enabled = 1;
-               else
+               } else {
                        trace_enabled = 0;
+                       atomic_dec(&trace_record_cmdline);
+               }
+
                tr->ctrl = val;
        }
 }
Index: linux-mcount.git/lib/tracing/tracer.c
===================================================================
--- linux-mcount.git.orig/lib/tracing/tracer.c  2008-01-21 10:03:57.000000000 
-0500
+++ linux-mcount.git/lib/tracing/tracer.c       2008-01-21 10:04:54.000000000 
-0500
@@ -49,6 +49,88 @@ void notrace tracing_reset(struct tracin
        atomic_set(&data->underrun, 0);
 }
 
+#define SAVED_CMDLINES 128
+static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
+static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
+static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
+static int cmdline_idx;
+static DEFINE_SPINLOCK(trace_cmdline_lock);
+atomic_t trace_record_cmdline;
+atomic_t trace_record_cmdline_disabled;
+
+static void trace_init_cmdlines(void)
+{
+       memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
+       memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
+       cmdline_idx = 0;
+}
+
+notrace void trace_stop_cmdline_recording(void);
+
+static void notrace trace_save_cmdline(struct task_struct *tsk)
+{
+       unsigned map;
+       unsigned idx;
+
+       if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
+               return;
+
+       /*
+        * It's not the end of the world if we don't get
+        * the lock, but we also don't want to spin
+        * nor do we want to disable interrupts,
+        * so if we miss here, then better luck next time.
+        */
+       if (!spin_trylock(&trace_cmdline_lock))
+               return;
+
+       idx = map_pid_to_cmdline[tsk->pid];
+       if (idx >= SAVED_CMDLINES) {
+               idx = (cmdline_idx + 1) % SAVED_CMDLINES;
+
+               map = map_cmdline_to_pid[idx];
+               if (map <= PID_MAX_DEFAULT)
+                       map_pid_to_cmdline[map] = (unsigned)-1;
+
+               map_pid_to_cmdline[tsk->pid] = idx;
+
+               cmdline_idx = idx;
+       }
+
+       memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
+
+       spin_unlock(&trace_cmdline_lock);
+}
+
+static notrace char *trace_find_cmdline(int pid)
+{
+       char *cmdline = "<...>";
+       unsigned map;
+
+       if (!pid)
+               return "<idle>";
+
+       if (pid > PID_MAX_DEFAULT)
+               goto out;
+
+       map = map_pid_to_cmdline[pid];
+       if (map >= SAVED_CMDLINES)
+               goto out;
+
+       cmdline = saved_cmdlines[map];
+
+ out:
+       return cmdline;
+}
+
+void tracing_record_cmdline(struct task_struct *tsk)
+{
+       if (atomic_read(&trace_record_cmdline_disabled))
+               return;
+
+       trace_save_cmdline(tsk);
+}
+
 static inline notrace struct tracing_entry *
 tracing_get_trace_entry(struct tracing_trace *tr,
                        struct tracing_trace_cpu *data)
@@ -90,7 +172,6 @@ tracing_generic_entry_update(struct trac
                ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
                ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
                (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
-       memcpy(entry->comm, tsk->comm, TASK_COMM_LEN);
 }
 
 notrace void tracing_function_trace(struct tracing_trace *tr,
@@ -242,6 +323,8 @@ static void *s_start(struct seq_file *m,
        loff_t l = 0;
        int i;
 
+       atomic_inc(&trace_record_cmdline_disabled);
+
        /* let the tracer grab locks here if needed */
        if (iter->tr->start)
                iter->tr->start(iter);
@@ -269,6 +352,8 @@ static void s_stop(struct seq_file *m, v
 {
        struct tracing_iterator *iter = m->private;
 
+       atomic_dec(&trace_record_cmdline_disabled);
+
        /* let the tracer release locks here if needed */
        if (iter->tr->stop)
                iter->tr->stop(iter);
@@ -390,8 +475,11 @@ static void notrace
 lat_print_generic(struct seq_file *m, struct tracing_entry *entry, int cpu)
 {
        int hardirq, softirq;
+       char *comm;
 
-       seq_printf(m, "%8.8s-%-5d ", entry->comm, entry->pid);
+       comm = trace_find_cmdline(entry->pid);
+
+       seq_printf(m, "%8.8s-%-5d ", comm, entry->pid);
        seq_printf(m, "%d", cpu);
        seq_printf(m, "%c%c",
                   (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
@@ -453,9 +541,12 @@ print_lat_fmt(struct seq_file *m, struct
        abs_usecs = cycles_to_usecs(entry->t - iter->tr->time_start);
 
        if (verbose) {
+               char *comm;
+
+               comm = trace_find_cmdline(entry->pid);
                seq_printf(m, "%16s %5d %d %d %08x %08x [%08lx]"
                           " %ld.%03ldms (+%ld.%03ldms): ",
-                          entry->comm,
+                          comm,
                           entry->pid, cpu, entry->flags,
                           entry->preempt_count, trace_idx,
                           cycles_to_usecs(entry->t),
@@ -492,6 +583,9 @@ static void notrace print_trace_fmt(stru
        unsigned long secs;
        int sym_only = !!(trace_flags & TRACE_ITER_SYM_ONLY);
        unsigned long long t;
+       char *comm;
+
+       comm = trace_find_cmdline(iter->ent->pid);
 
        t = cycles_to_usecs(iter->ent->t);
        usec_rem = do_div(t, 1000000ULL);
@@ -499,7 +593,7 @@ static void notrace print_trace_fmt(stru
 
        seq_printf(m, "[%5lu.%06lu] ", secs, usec_rem);
        seq_printf(m, "CPU %d: ", iter->cpu);
-       seq_printf(m, "%s:%d ", iter->ent->comm,
+       seq_printf(m, "%s:%d ", comm,
                   iter->ent->pid);
        switch (iter->ent->type) {
        case TRACE_FN:
@@ -813,6 +907,14 @@ static __init int trace_init_debugfs(voi
        return 0;
 }
 
-device_initcall(trace_init_debugfs);
+static __init int trace_init(void)
+{
+       trace_init_cmdlines();
+
+       return trace_init_debugfs();
+
+}
+
+device_initcall(trace_init);
 
 #endif /* CONFIG_DEBUG_FS */
Index: linux-mcount.git/lib/tracing/tracer.h
===================================================================
--- linux-mcount.git.orig/lib/tracing/tracer.h  2008-01-21 10:03:57.000000000 
-0500
+++ linux-mcount.git/lib/tracing/tracer.h       2008-01-21 10:04:54.000000000 
-0500
@@ -25,7 +25,6 @@ struct tracing_entry {
        char preempt_count; /* assumes PREEMPT_MASK is 8 bits or less */
        int pid;
        cycle_t t;
-       char comm[TASK_COMM_LEN];
        union {
                struct tracing_function fn;
                struct tracing_sched_switch ctx;
@@ -91,6 +90,10 @@ void tracing_sched_switch_trace(struct t
                                struct task_struct *prev,
                                struct task_struct *next,
                                unsigned long flags);
+void tracing_record_cmdline(struct task_struct *tsk);
+
+extern atomic_t trace_record_cmdline;
+extern atomic_t trace_record_cmdline_disabled;
 
 extern struct file_operations tracing_fops;
 extern struct file_operations tracing_lt_fops;

-- 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to