On Thu, Aug 22, 2019 at 02:28:10PM +0100, Patrick Bellasi wrote:

> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 04fc161e4dbe..fc2dc86a2abe 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -1043,6 +1043,57 @@ static inline void uclamp_rq_dec(struct rq *rq, struct 
> task_struct *p)
>               uclamp_rq_dec_id(rq, p, clamp_id);
>  }
>  
> +static inline void
> +uclamp_update_active(struct task_struct *p, unsigned int clamp_id)
> +{
> +     struct rq_flags rf;
> +     struct rq *rq;
> +
> +     /*
> +      * Lock the task and the rq where the task is (or was) queued.
> +      *
> +      * We might lock the (previous) rq of a !RUNNABLE task, but that's the
> +      * price to pay to safely serialize util_{min,max} updates with
> +      * enqueues, dequeues and migration operations.
> +      * This is the same locking schema used by __set_cpus_allowed_ptr().
> +      */
> +     rq = task_rq_lock(p, &rf);

Since modifying cgroup parameters is priv only, this should be OK I
suppose. Priv can already DoS the system anyway.

> +     /*
> +      * Setting the clamp bucket is serialized by task_rq_lock().
> +      * If the task is not yet RUNNABLE and its task_struct is not
> +      * affecting a valid clamp bucket, the next time it's enqueued,
> +      * it will already see the updated clamp bucket value.
> +      */
> +     if (!p->uclamp[clamp_id].active)
> +             goto done;
> +
> +     uclamp_rq_dec_id(rq, p, clamp_id);
> +     uclamp_rq_inc_id(rq, p, clamp_id);
> +
> +done:

I'm thinking that:

        if (p->uclamp[clamp_id].active) {
                uclamp_rq_dec_id(rq, p, clamp_id);
                uclamp_rq_inc_id(rq, p, clamp_id);
        }

was too obvious? ;-)

> +
> +     task_rq_unlock(rq, p, &rf);
> +}

Reply via email to