At 2026-04-03 10:55:58, "Jason Wang" <[email protected]> wrote:
>On Tue, Mar 31, 2026 at 6:27 PM Longjun Tang <[email protected]> wrote:
>>
>> From: Longjun Tang <[email protected]>
>>
>> In the vring_interrupt, if the used ring is empty, IRQ_NONE is returned.
>> However,Sometimes, such as with busy-polling, buffers might be consumed
>> from the used ring before an stale interrupt notification arrives. it
>> leading to return IRQ_NONE.
>>
>> The kernel's spurious-IRQ detector counts consecutive IRQ_NONE returns
>> and will permanently disable the interrupt line if 99,900 out of 100,000
>> interrupts go unhandled.
>>
>> Add is_cb_disabled() to virtqueue_ops and, when more_used() is false but
>> cb are suppressed, return IRQ_HANDLED instead of IRQ_NONE so the spurious
>> counter does not accumulate.
>>
>> Signed-off-by: Longjun Tang <[email protected]>
>> ---
>>  drivers/virtio/virtio_ring.c | 29 +++++++++++++++++++++++++++++
>>  1 file changed, 29 insertions(+)
>>
>> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
>> index 335692d41617..52df932fc4a2 100644
>> --- a/drivers/virtio/virtio_ring.c
>> +++ b/drivers/virtio/virtio_ring.c
>> @@ -185,6 +185,7 @@ struct virtqueue_ops {
>>                      unsigned int last_used_idx);
>>         void *(*detach_unused_buf)(struct vring_virtqueue *vq);
>>         bool (*more_used)(const struct vring_virtqueue *vq);
>> +       bool (*is_cb_disabled)(const struct vring_virtqueue *vq);
>>         int (*resize)(struct vring_virtqueue *vq, u32 num);
>>         void (*reset)(struct vring_virtqueue *vq);
>>  };
>> @@ -1063,6 +1064,12 @@ static void virtqueue_disable_cb_split(struct 
>> vring_virtqueue *vq)
>>         }
>>  }
>>
>> +static bool is_cb_disabled_split(const struct vring_virtqueue *vq)
>> +{
>> +       return !!(data_race(vq->split.avail_flags_shadow) &
>> +                 VRING_AVAIL_F_NO_INTERRUPT);
>> +}
>> +
>>  static unsigned int virtqueue_enable_cb_prepare_split(struct 
>> vring_virtqueue *vq)
>>  {
>>         u16 last_used_idx;
>> @@ -2227,6 +2234,12 @@ static void virtqueue_disable_cb_packed(struct 
>> vring_virtqueue *vq)
>>         }
>>  }
>>
>> +static bool is_cb_disabled_packed(const struct vring_virtqueue *vq)
>> +{
>> +       return data_race(vq->packed.event_flags_shadow) ==
>> +              VRING_PACKED_EVENT_FLAG_DISABLE;
>> +}
>> +
>>  static unsigned int virtqueue_enable_cb_prepare_packed(struct 
>> vring_virtqueue *vq)
>>  {
>>         START_USE(vq);
>> @@ -2644,6 +2657,7 @@ static const struct virtqueue_ops split_ops = {
>>         .poll = virtqueue_poll_split,
>>         .detach_unused_buf = virtqueue_detach_unused_buf_split,
>>         .more_used = more_used_split,
>> +       .is_cb_disabled = is_cb_disabled_split,
>>         .resize = virtqueue_resize_split,
>>         .reset = virtqueue_reset_split,
>>  };
>> @@ -2658,6 +2672,7 @@ static const struct virtqueue_ops packed_ops = {
>>         .poll = virtqueue_poll_packed,
>>         .detach_unused_buf = virtqueue_detach_unused_buf_packed,
>>         .more_used = more_used_packed,
>> +       .is_cb_disabled = is_cb_disabled_packed,
>>         .resize = virtqueue_resize_packed,
>>         .reset = virtqueue_reset_packed,
>>  };
>> @@ -2672,6 +2687,7 @@ static const struct virtqueue_ops split_in_order_ops = 
>> {
>>         .poll = virtqueue_poll_split,
>>         .detach_unused_buf = virtqueue_detach_unused_buf_split,
>>         .more_used = more_used_split_in_order,
>> +       .is_cb_disabled = is_cb_disabled_split,
>>         .resize = virtqueue_resize_split,
>>         .reset = virtqueue_reset_split,
>>  };
>> @@ -2686,6 +2702,7 @@ static const struct virtqueue_ops packed_in_order_ops 
>> = {
>>         .poll = virtqueue_poll_packed,
>>         .detach_unused_buf = virtqueue_detach_unused_buf_packed,
>>         .more_used = more_used_packed_in_order,
>> +       .is_cb_disabled = is_cb_disabled_packed,
>>         .resize = virtqueue_resize_packed,
>>         .reset = virtqueue_reset_packed,
>>  };
>> @@ -3231,6 +3248,18 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
>>         struct vring_virtqueue *vq = to_vvq(_vq);
>>
>>         if (!more_used(vq)) {
>> +               /*
>> +                * Stale interrupt: the device posted this notification
>> +                * before it observed the callback suppression;
>> +                * When more_used returns empty, IRQ_HANDLED should be
>> +                * returned for stale interrupts.
>> +                */
>> +               if (VIRTQUEUE_CALL(vq, is_cb_disabled)) {
>> +                       if (vq->event)
>> +                               data_race(vq->event_triggered = true);
>
>Why event idx is special here?
>
>Btw, looking at the comment of virtqueue_disable_cb_split:
>
>                /*
>                 * If device triggered an event already it won't
>trigger one again:
>                 * no need to disable.
>                 */
>        if (vq->event_triggered)
>                        return;
>
>It makes sense only for event index.

yes, I will remove this part in the next version.

>
>Thanks
>
>> +                       pr_debug("virtqueue stale interrupt (callbacks 
>> disabled) for %p\n", vq);
>> +                       return IRQ_HANDLED;
>> +               }
>>                 pr_debug("virtqueue interrupt with no work for %p\n", vq);
>>                 return IRQ_NONE;
>>         }
>> --
>> 2.43.0
>>

Reply via email to