Sorry for forget Cc Steven,
On Wed, Apr 01, 2015 at 07:18:36AM +0800, Wanpeng Li wrote:
>On Mon, Mar 30, 2015 at 09:41:40AM -0400, Steven Rostedt wrote:
>>On Mon, 30 Mar 2015 07:07:10 +0800
>>Wanpeng Li <wanpeng...@linux.intel.com> wrote:
>>
>>> +static int find_next_push_cpu(struct rq *rq)
>>> +{
>>> +   struct rq *next_rq;
>>> +   int cpu;
>>> +
>>> +   while (1) {
>>> +           cpu = dlo_next_cpu(rq);
>>> +           if (cpu >= nr_cpu_ids)
>>> +                   break;
>>> +           next_rq = cpu_rq(cpu);
>>> +
>>> +           /* Make sure the next rq can push to this rq */
>>> +           if (dl_time_before(next_rq->dl.earliest_dl.next,
>>> +                   rq->dl.earliest_dl.curr))
>>> +                   break;
>>> +   }
>>> +
>>> +   return cpu;
>>> +}
>>> +
>>
>>Is it possible that we don't duplicate the code and that we can find a
>>way to share the code between rt and dl? It's not that trivial to just
>>cut and paste. If a bug is found in one, it most likely wont be ported
>>to the other.
>>
>>The best is if we can share the code here some way. Perhaps have a
>>single IPI that checks both rt and dl?
>
>Peter, Juri, what's your ideas? ;)
>
>Regards,
>Wanpeng Li 
>
>>
>>-- Steve
>>
>>> +#define RT_PUSH_IPI_EXECUTING              1
>>> +#define RT_PUSH_IPI_RESTART                2
>>> +
>>> +static void tell_cpu_to_push(struct rq *rq)
>>> +{
>>> +   int cpu;
>>> +
>>> +   if (rq->dl.push_flags & RT_PUSH_IPI_EXECUTING) {
>>> +           raw_spin_lock(&rq->dl.push_lock);
>>> +           /* Make sure it's still executing */
>>> +           if (rq->dl.push_flags & RT_PUSH_IPI_EXECUTING) {
>>> +                   /*
>>> +                    * Tell the IPI to restart the loop as things have
>>> +                    * changed since it started.
>>> +                    */
>>> +                   rq->dl.push_flags |= RT_PUSH_IPI_RESTART;
>>> +                   raw_spin_unlock(&rq->dl.push_lock);
>>> +                   return;
>>> +           }
>>> +           raw_spin_unlock(&rq->dl.push_lock);
>>> +   }
>>> +
>>> +   /* When here, there's no IPI going around */
>>> +
>>> +   rq->dl.push_cpu = rq->cpu;
>>> +   cpu = find_next_push_cpu(rq);
>>> +   if (cpu >= nr_cpu_ids)
>>> +           return;
>>> +
>>> +   rq->dl.push_flags = RT_PUSH_IPI_EXECUTING;
>>> +
>>> +   irq_work_queue_on(&rq->dl.push_work, cpu);
>>> +}
>>> +
>>> +/* Called from hardirq context */
>>> +static void try_to_push_tasks(void *arg)
>>> +{
>>> +   struct dl_rq *dl_rq = arg;
>>> +   struct rq *rq, *src_rq;
>>> +   int this_cpu;
>>> +   int cpu;
>>> +
>>> +   this_cpu = dl_rq->push_cpu;
>>> +
>>> +   /* Paranoid check */
>>> +   BUG_ON(this_cpu != smp_processor_id());
>>> +
>>> +   rq = cpu_rq(this_cpu);
>>> +   src_rq = rq_of_dl_rq(dl_rq);
>>> +
>>> +again:
>>> +   if (has_pushable_dl_tasks(rq)) {
>>> +           raw_spin_lock(&rq->lock);
>>> +           push_dl_task(rq);
>>> +           raw_spin_unlock(&rq->lock);
>>> +   }
>>> +
>>> +   /* Pass the IPI to the next rt overloaded queue */
>>> +   raw_spin_lock(&dl_rq->push_lock);
>>> +   /*
>>> +    * If the source queue changed since the IPI went out,
>>> +    * we need to restart the search from that CPU again.
>>> +    */
>>> +   if (dl_rq->push_flags & RT_PUSH_IPI_RESTART) {
>>> +           dl_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
>>> +           dl_rq->push_cpu = src_rq->cpu;
>>> +   }
>>> +
>>> +   cpu = find_next_push_cpu(src_rq);
>>> +
>>> +   if (cpu >= nr_cpu_ids)
>>> +           dl_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
>>> +   raw_spin_unlock(&dl_rq->push_lock);
>>> +
>>> +   if (cpu >= nr_cpu_ids)
>>> +           return;
>>> +
>>> +   /*
>>> +    * It is possible that a restart caused this CPU to be
>>> +    * chosen again. Don't bother with an IPI, just see if we
>>> +    * have more to push.
>>> +    */
>>> +   if (unlikely(cpu == rq->cpu))
>>> +           goto again;
>>> +
>>> +   /* Try the next RT overloaded CPU */
>>> +   irq_work_queue_on(&dl_rq->push_work, cpu);
>>> +}
>>> +
>>> +static void push_irq_work_func(struct irq_work *work)
>>> +{
>>> +   struct dl_rq *dl_rq = container_of(work, struct dl_rq, push_work);
>>> +
>>> +   try_to_push_tasks(dl_rq);
>>> +}
>>> +#endif /* HAVE_RT_PUSH_IPI */
>>> +
>>>  static int pull_dl_task(struct rq *this_rq)
>>>  {
>>>     int this_cpu = this_rq->cpu, ret = 0, cpu;
>>> @@ -1432,6 +1602,13 @@ static int pull_dl_task(struct rq *this_rq)
>>>      */
>>>     smp_rmb();
>>>  
>>> +#ifdef HAVE_RT_PUSH_IPI
>>> +   if (sched_feat(RT_PUSH_IPI)) {
>>> +           tell_cpu_to_push(this_rq);
>>> +           return 0;
>>> +   }
>>> +#endif
>>> +
>>>     for_each_cpu(cpu, this_rq->rd->dlo_mask) {
>>>             if (this_cpu == cpu)
>>>                     continue;
>>> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
>>> index dd532c5..87a937c 100644
>>> --- a/kernel/sched/sched.h
>>> +++ b/kernel/sched/sched.h
>>> @@ -500,6 +500,12 @@ struct dl_rq {
>>>      */
>>>     struct rb_root pushable_dl_tasks_root;
>>>     struct rb_node *pushable_dl_tasks_leftmost;
>>> +#ifdef HAVE_RT_PUSH_IPI
>>> +   int push_flags;
>>> +   int push_cpu;
>>> +   struct irq_work push_work;
>>> +   raw_spinlock_t push_lock;
>>> +#endif
>>>  #else
>>>     struct dl_bw dl_bw;
>>>  #endif
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to