> From: Sean Christopherson <sea...@google.com>
> Sent: Saturday, April 5, 2025 5:15 AM
> 
> @@ -505,15 +505,12 @@ static int vfio_msi_set_vector_signal(struct
> vfio_pci_core_device *vdev,
>       if (ret)
>               goto out_put_eventfd_ctx;
> 
> -     ctx->producer.token = trigger;
>       ctx->producer.irq = irq;
> -     ret = irq_bypass_register_producer(&ctx->producer);
> +     ret = irq_bypass_register_producer(&ctx->producer, trigger);
>       if (unlikely(ret)) {
>               dev_info(&pdev->dev,
>               "irq bypass producer (token %p) registration fails: %d\n",
>               ctx->producer.token, ret);

Use 'trigger' as ctx->producer.token is NULL if registration fails. 

> @@ -18,20 +19,20 @@ struct irq_bypass_consumer;
>   * The IRQ bypass manager is a simple set of lists and callbacks that allows
>   * IRQ producers (ex. physical interrupt sources) to be matched to IRQ
>   * consumers (ex. virtualization hardware that allows IRQ bypass or offload)
> - * via a shared token (ex. eventfd_ctx).  Producers and consumers register
> - * independently.  When a token match is found, the optional @stop
> callback
> - * will be called for each participant.  The pair will then be connected via
> - * the @add_* callbacks, and finally the optional @start callback will allow
> - * any final coordination.  When either participant is unregistered, the
> - * process is repeated using the @del_* callbacks in place of the @add_*
> - * callbacks.  Match tokens must be unique per producer/consumer, 1:N
> pairings
> - * are not supported.
> + * via a shared eventfd_ctx).  Producers and consumers register
> independently.

s/eventfd_ctx)/eventfd_ctx/

> +int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer,
> +                              struct eventfd_ctx *eventfd)
>  {
>       struct irq_bypass_consumer *tmp;
>       struct irq_bypass_producer *producer;
>       int ret;
> 
> -     if (!consumer->token ||
> -         !consumer->add_producer || !consumer->del_producer)
> +     if (WARN_ON_ONCE(consumer->token))
> +             return -EINVAL;
> +
> +     if (!consumer->add_producer || !consumer->del_producer)
>               return -EINVAL;
> 
>       mutex_lock(&lock);
> 
>       list_for_each_entry(tmp, &consumers, node) {
> -             if (tmp->token == consumer->token || tmp == consumer) {
> +             if (tmp->token == eventfd || tmp == consumer) {
>                       ret = -EBUSY;
>                       goto out_err;
>               }
>       }

the 2nd check 'tmp == consumer' is redundant. If they are equal 
consumer->token is not NULL then the earlier WARN_ON will be
triggered already.

otherwise,

Reviewed-by: Kevin Tian <kevin.t...@intel.com>

Reply via email to