Hi Florian,

Florian Fainelli <f.faine...@gmail.com> writes:

> On 3/15/19 2:16 PM, Leandro Dorileo wrote:
>> The Time Aware Priority Scheduler is heavily dependent to link speed,
>> it relies on it to calculate transmission bytes per cycle, we can't
>> properly calculate the so called budget if the device has failed
>> to report the link speed.
>> 
>> In that case we can't dequeue packets assuming a wrong budget.
>> This patch makes sure we fail to dequeue case:
>> 
>> 1) __ethtool_get_link_ksettings() reports error or 2) the ethernet
>> driver failed to set the ksettings' speed value (setting link speed
>> to SPEED_UNKNOWN).
>> 
>> Additionally we re calculate the budget whenever the link speed is
>> changed.
>> 
>> Fixes: 5a781ccbd19e4 ("tc: Add support for configuring the taprio scheduler")
>> Signed-off-by: Leandro Dorileo <leandro.maciel.dori...@intel.com>
>> ---
>>  net/sched/sch_taprio.c | 90 ++++++++++++++++++++++++++++++++++--------
>>  1 file changed, 74 insertions(+), 16 deletions(-)
>> 
>> diff --git a/net/sched/sch_taprio.c b/net/sche
> d/sch_taprio.c
>> index 206e4dbed12f..fa237908ba09 100644
>> --- a/net/sched/sch_taprio.c
>> +++ b/net/sched/sch_taprio.c
>> @@ -20,6 +20,9 @@
>>  #include <net/pkt_cls.h>
>>  #include <net/sch_generic.h>
>>  
>> +static LIST_HEAD(taprio_list);
>> +static DEFINE_SPINLOCK(taprio_list_lock);
>> +
>>  #define TAPRIO_ALL_GATES_OPEN -1
>>  
>>  struct sched_entry {
>> @@ -42,9 +45,9 @@ struct taprio_sched {
>>      struct Qdisc *root;
>>      s64 base_time;
>>      int clockid;
>> -    int picos_per_byte; /* Using picoseconds because for 10Gbps+
>> -                         * speeds it's sub-nanoseconds per byte
>> -                         */
>> +    atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
>> +                                * speeds it's sub-nanoseconds per byte
>> +                                */
>>      size_t num_entries;
>>  
>>      /* Protects the update side of the RCU protected current_entry */
>> @@ -53,6 +56,7 @@ struct taprio_sched {
>>      struct list_head entries;
>>      ktime_t (*get_time)(void);
>>      struct hrtimer advance_timer;
>> +    struct list_h
> ead taprio_list;
>>  };
>>  
>>  static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>> @@ -117,7 +121,7 @@ static struct sk_buff *taprio_peek(struct Qdisc *sch)
>>  
>>  static inline int length_to_duration(struct taprio_sched *q, int len)
>>  {
>> -    return (len * q->picos_per_byte) / 1000;
>> +    return (len * atomic64_read(&q->picos_per_byte)) / 1000;
>>  }
>>  
>>  static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
>> @@ -129,6 +133,11 @@ static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
>>      u32 gate_mask;
>>      int i;
>>  
>> +    if (atomic64_read(&q->picos_per_byte) == -1) {
>> +            WARN_ONCE(1, "taprio: dequeue() called with unknown picos per 
>> byte.");
>> +            return NULL;
>> +    }
>> +
>>      rcu_read_lock();
>>      entry = rcu_dereference(q->current_entry);
>>      /* if there's no entry, it means that the schedule didn't
>> @@ -233,7 +242,7 @@ static enum hrtimer_restart advance_sched(struct hrtimer 
>> *timer)
>>  
>>      next->close_time = close_time;
>>      atomic_set(&ne
> xt->budget,
>> -               (next->interval * 1000) / q->picos_per_byte);
>> +               (next->interval * 1000) / atomic64_read(&q->picos_per_byte));
>>  
>>  first_run:
>>      rcu_assign_pointer(q->current_entry, next);
>> @@ -567,7 +576,8 @@ static void taprio_start_sched(struct Qdisc *sch, 
>> ktime_t start)
>>  
>>      first->close_time = ktime_add_ns(start, first->interval);
>>      atomic_set(&first->budget,
>> -               (first->interval * 1000) / q->picos_per_byte);
>> +               (first->interval * 1000) /
>> +               atomic64_read(&q->picos_per_byte));
>>      rcu_assign_pointer(q->current_entry, NULL);
>>  
>>      spin_unlock_irqrestore(&q->current_entry_lock, flags);
>> @@ -575,6 +585,45 @@ static void taprio_start_sched(struct Qdisc *sch, 
>> ktime_t start)
>>      hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
>>  }
>>  
>> +static void taprio_set_picos_per_byte(struct net_device *dev,
>> +                                  struct taprio_sched *q)
>> +{
>> +    struct ethtool_link_ksettings ecmd;
>> +    int picos_per_byte = -1;
>> +
>> +
>       if (!__ethtool_get_link_ksettings(dev, &ecmd) &&
>> +        ecmd.base.speed != SPEED_UNKNOWN)
>> +            picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
>> +                                       ecmd.base.speed * 1000 * 1000);
>> +
>> +    atomic64_set(&q->picos_per_byte, picos_per_byte);
>> +    pr_info("taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
>> +            dev->name, atomic64_read(&q->picos_per_byte), ecmd.base.speed);
>
> It does not seem appropriate to use pr_info() here whenever the link
> speed changes and the taprio scheduler is reconfigured to match the
> network device's link speed. netdev_dbg() might be more appropriate, or
> no message at all to avoid spamming the kernel log. Same thing for your
> second patch.

It's being useful to me on debugging it, however I agree users will not be much
interested on it.

Thanks...

--
Dorileo

Reply via email to