check_hung_uninterruptible_tasks() stops after rcu_lock_break() if either "t" or "g" exits, this is suboptimal.
If "t" is alive, we can always continue, t->group_leader can be used as the new "g". We do not even bother to check g != NULL in this case. If "g" is alive, we can at least continue the outer for_each_process() loop. Signed-off-by: Oleg Nesterov <o...@redhat.com> --- kernel/hung_task.c | 29 ++++++++++++++++++++--------- 1 files changed, 20 insertions(+), 9 deletions(-) diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 4735b99..f488059 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -134,20 +134,26 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) * For preemptible RCU it is sufficient to call rcu_read_unlock in order * to exit the grace period. For classic RCU, a reschedule is required. */ -static bool rcu_lock_break(struct task_struct *g, struct task_struct *t) +static void rcu_lock_break(struct task_struct **g, struct task_struct **t) { - bool can_cont; + bool alive; + + get_task_struct(*g); + get_task_struct(*t); - get_task_struct(g); - get_task_struct(t); rcu_read_unlock(); cond_resched(); rcu_read_lock(); - can_cont = pid_alive(g) && pid_alive(t); - put_task_struct(t); - put_task_struct(g); - return can_cont; + alive = pid_alive(*g); + put_task_struct(*g); + if (!alive) + *g = NULL; + + alive = pid_alive(*t); + put_task_struct(*t); + if (!alive) + *t = NULL; } /* @@ -178,7 +184,12 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) if (!--batch_count) { batch_count = HUNG_TASK_BATCHING; - if (!rcu_lock_break(g, t)) + rcu_lock_break(&g, &t); + if (t) /* in case g == NULL */ + g = t->group_leader; + else if (g) /* continue the outer loop */ + break; + else /* both dead */ goto unlock; } /* use "==" to skip the TASK_KILLABLE tasks */ -- 1.5.5.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/