On 10/14/20 1:53 PM, Loic Poulain wrote:
> This patch adds a new network driver implementing MHI transport for
> network packets. Packets can be in any format, though QMAP (rmnet)
> is the usual protocol (flow control + PDN mux).
> 

...

> +static void mhi_net_rx_refill_work(struct work_struct *work)
> +{
> +     struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
> +                                                   rx_refill.work);
> +     struct net_device *ndev = mhi_netdev->ndev;
> +     struct mhi_device *mdev = mhi_netdev->mdev;
> +     struct sk_buff *skb;
> +     int err;
> +
> +     do {
> +             skb = netdev_alloc_skb(ndev, READ_ONCE(ndev->mtu));
> +             if (unlikely(!skb))
> +                     break;

It is a bit strange you use READ_ONCE(ndev->mtu) here, but later
re-read ndev->mtu

It seems you need to store the READ_ONCE() in a temporary variable,

                unsigned int mtu = READ_ONCE(ndev->mtu);

> +
> +             err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, ndev->mtu,
> +                                 MHI_EOT);

                 s/ndev->mtu/mtu/


> +             if (err) {
> +                     if (unlikely(err != -ENOMEM))
> +                             net_err_ratelimited("%s: Failed to queue RX buf 
> (%d)\n",
> +                                                 ndev->name, err);
> +                     kfree_skb(skb);
> +                     break;
> +             }
> +
> +             atomic_inc(&mhi_netdev->stats.rx_queued);

This is an unbound loop in the kernel ?

Please add a :

                cond_resched();

Before anyone gets hurt.


> +     } while (1);
> +
> +     /* If we're still starved of rx buffers, reschedule latter */
> +     if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued)))
> +             schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
> +}
> +
> 


Reply via email to