On Thu, Jun 28, 2018 at 06:29:41PM +0200, Frederic Weisbecker wrote:
> ---
>  kernel/sched/core.c | 18 ++++++++++--------
>  1 file changed, 10 insertions(+), 8 deletions(-)
> 
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 78d8fac..da8f121 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -3127,16 +3127,18 @@ static void sched_tick_remote(struct work_struct 
> *work)
>               u64 delta;
>  
>               rq_lock_irq(rq, &rf);
> -             update_rq_clock(rq);
>               curr = rq->curr;
> -             delta = rq_clock_task(rq) - curr->se.exec_start;
> +             if (!is_idle_task(curr)) {
> +                     update_rq_clock(rq);
> +                     delta = rq_clock_task(rq) - curr->se.exec_start;
>  
> -             /*
> -              * Make sure the next tick runs within a reasonable
> -              * amount of time.
> -              */
> -             WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
> -             curr->sched_class->task_tick(rq, curr, 0);
> +                     /*
> +                      * Make sure the next tick runs within a reasonable
> +                      * amount of time.
> +                      */
> +                     WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
> +                     curr->sched_class->task_tick(rq, curr, 0);
> +             }
>               rq_unlock_irq(rq, &rf);
>       }

Instead of deep indent, how about we write it like so: ?

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3113,7 +3113,9 @@ static void sched_tick_remote(struct wor
        struct tick_work *twork = container_of(dwork, struct tick_work, work);
        int cpu = twork->cpu;
        struct rq *rq = cpu_rq(cpu);
+       struct task_struct *curr;
        struct rq_flags rf;
+       u64 delta;
 
        /*
         * Handle the tick only if it appears the remote CPU is running in full
@@ -3122,26 +3124,28 @@ static void sched_tick_remote(struct wor
         * statistics and checks timeslices in a time-independent way, 
regardless
         * of when exactly it is running.
         */
-       if (!idle_cpu(cpu) && tick_nohz_tick_stopped_cpu(cpu)) {
-               struct task_struct *curr;
-               u64 delta;
-
-               rq_lock_irq(rq, &rf);
-               curr = rq->curr;
-               if (!is_idle_task(curr)) {
-                       update_rq_clock(rq);
-                       delta = rq_clock_task(rq) - curr->se.exec_start;
-
-                       /*
-                        * Make sure the next tick runs within a reasonable
-                        * amount of time.
-                        */
-                       WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
-                       curr->sched_class->task_tick(rq, curr, 0);
-               }
-               rq_unlock_irq(rq, &rf);
-       }
+       if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu))
+               goto out_requeue;
 
+       rq_lock_irq(rq, &rf);
+       curr = rq->curr;
+       if (is_idle_task(curr))
+               goto out_unlock;
+
+       update_rq_clock(rq);
+       delta = rq_clock_task(rq) - curr->se.exec_start;
+
+       /*
+        * Make sure the next tick runs within a reasonable
+        * amount of time.
+        */
+       WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
+       curr->sched_class->task_tick(rq, curr, 0);
+
+out_unlock:
+       rq_unlock_irq(rq, &rf);
+
+out_requeue:
        /*
         * Run the remote tick once per second (1Hz). This arbitrary
         * frequency is large enough to avoid overload but short enough

Reply via email to