On Tue, 11 Feb 2025 14:06:53 -0700 Ahmed Zaki wrote:
> +static void netif_napi_affinity_release(struct kref *ref)
> +{
> +     struct napi_struct *napi =
> +             container_of(ref, struct napi_struct, notify.kref);
> +     struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap;
> +

We should only get here from our own cleanup path, otherwise locking
and state sync may be a concern:

        netdev_assert_locked(dev);
        WARN_ON(test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER,
                                   &napi->state));

> +     rmap->obj[napi->napi_rmap_idx] = NULL;
> +     napi->napi_rmap_idx = -1;
> +     cpu_rmap_put(rmap);
> +}
> +
> +static int napi_irq_cpu_rmap_add(struct napi_struct *napi, int irq)
> +{
> +     struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap;
> +     int rc;
> +
> +     napi->notify.notify = netif_irq_cpu_rmap_notify;
> +     napi->notify.release = netif_napi_affinity_release;
> +     cpu_rmap_get(rmap);
> +     rc = cpu_rmap_add(rmap, napi);
> +     if (rc < 0)
> +             goto err_add;
> +
> +     napi->napi_rmap_idx = rc;
> +     rc = irq_set_affinity_notifier(irq, &napi->notify);
> +     if (rc)
> +             goto err_set;
> +
> +     set_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state);
> +     return 0;

some of this function is common with the code in 
netif_napi_set_irq_locked()
under 
        } else if (napi->dev->irq_affinity_auto) {

could you refactor this to avoid the duplication 
and make it clearer which parts differ?

> +err_set:
> +     rmap->obj[napi->napi_rmap_idx] = NULL;
> +     napi->napi_rmap_idx = -1;
> +err_add:
> +     cpu_rmap_put(rmap);
> +     return rc;
> +}
> +
> +int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs)
> +{
> +     if (dev->rx_cpu_rmap_auto)
> +             return 0;
> +
> +     dev->rx_cpu_rmap = alloc_irq_cpu_rmap(num_irqs);
> +     if (!dev->rx_cpu_rmap)
> +             return -ENOMEM;
> +
> +     dev->rx_cpu_rmap_auto = true;
> +     return 0;
> +}
> +EXPORT_SYMBOL(netif_enable_cpu_rmap);
> +
> +static void netif_del_cpu_rmap(struct net_device *dev)
> +{
> +     struct cpu_rmap *rmap = dev->rx_cpu_rmap;
> +
> +     if (!dev->rx_cpu_rmap_auto)
> +             return;
> +
> +     /* Free the rmap */
> +     cpu_rmap_put(rmap);
> +     dev->rx_cpu_rmap = NULL;
> +     dev->rx_cpu_rmap_auto = false;
> +}
> +
> +#else
> +static int napi_irq_cpu_rmap_add(struct napi_struct *napi, int irq)
> +{
> +     return 0;
> +}
> +
> +int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs)
> +{
> +     return 0;
> +}
> +EXPORT_SYMBOL(netif_enable_cpu_rmap);
> +
> +static void netif_del_cpu_rmap(struct net_device *dev)
> +{
> +}
> +#endif
> +
> +void netif_napi_set_irq_locked(struct napi_struct *napi, int irq)
> +{
> +     int rc;

maybe netdev_assert_locked_or_invisible(napi->dev); ?

> +     if (napi->irq == irq)
> +             return;
> +
> +     /* Remove existing rmap entries */
> +     if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state))
> +             irq_set_affinity_notifier(napi->irq, NULL);
> +
> +     napi->irq = irq;
> +     if (irq < 0)
> +             return;
> +
> +     rc = napi_irq_cpu_rmap_add(napi, irq);
> +     if (rc)
> +             netdev_warn(napi->dev, "Unable to update aRFS map (%d)\n", rc);
> +}
> +EXPORT_SYMBOL(netif_napi_set_irq_locked);
> +
>  static void napi_restore_config(struct napi_struct *n)
>  {
>       n->defer_hard_irqs = n->config->defer_hard_irqs;

Reply via email to