From: Soumya PN <[email protected]>

In ftrace.c inside the function alloc_retstack_tasklist() (which will be
invoked when function_graph tracing is on) the tasklist_lock is being
held as reader while iterating through a list of threads. Here the lock
is being held as reader with irqs disabled. The tasklist_lock is never
write_locked in interrupt context so it is safe to not disable interrupts
for the duration of read_lock in this block which, can be significant,
given the block of code iterates through all threads. Hence changing the
code to call read_lock() and read_unlock() instead of read_lock_irqsave()
and read_unlock_irqrestore().

A similar change was made in commits: 8063e41d2ffc ("tracing: Change
syscall_*regfunc() to check PF_KTHREAD and use for_each_process_thread()")'
and 3472eaa1f12e ("sched: normalize_rt_tasks(): Don't use _irqsave for
tasklist_lock, use task_rq_lock()")'

Link: 
http://lkml.kernel.org/r/[email protected]

Signed-off-by: Soumya PN <[email protected]>
Signed-off-by: Steven Rostedt <[email protected]>
---
 kernel/trace/ftrace.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b1870fbd2b67..a6804823a058 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -5713,7 +5713,6 @@ static int alloc_retstack_tasklist(struct 
ftrace_ret_stack **ret_stack_list)
 {
        int i;
        int ret = 0;
-       unsigned long flags;
        int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
        struct task_struct *g, *t;
 
@@ -5729,7 +5728,7 @@ static int alloc_retstack_tasklist(struct 
ftrace_ret_stack **ret_stack_list)
                }
        }
 
-       read_lock_irqsave(&tasklist_lock, flags);
+       read_lock(&tasklist_lock);
        do_each_thread(g, t) {
                if (start == end) {
                        ret = -EAGAIN;
@@ -5747,7 +5746,7 @@ static int alloc_retstack_tasklist(struct 
ftrace_ret_stack **ret_stack_list)
        } while_each_thread(g, t);
 
 unlock:
-       read_unlock_irqrestore(&tasklist_lock, flags);
+       read_unlock(&tasklist_lock);
 free:
        for (i = start; i < end; i++)
                kfree(ret_stack_list[i]);
-- 
2.8.0.rc3


Reply via email to