On Mon, Mar 24, 2025 at 7:01 AM Jason Wang <jasow...@redhat.com> wrote:
>
> Factor out the core logic for updating last_used_idx to be reused by
> the packed in order implementation.
>

Acked-by: Eugenio Pérez <epere...@redhat.com>

> Signed-off-by: Jason Wang <jasow...@redhat.com>
> ---
>  drivers/virtio/virtio_ring.c | 43 +++++++++++++++++++++---------------
>  1 file changed, 25 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index fe3e6f3d0f96..bd4faf04862c 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -1749,6 +1749,30 @@ static bool more_used_packed(const struct 
> vring_virtqueue *vq)
>         return virtqueue_poll_packed(vq, READ_ONCE(vq->last_used_idx));
>  }
>
> +static void update_last_used_idx_packed(struct vring_virtqueue *vq,
> +                                       u16 id, u16 last_used,
> +                                       u16 used_wrap_counter)
> +{
> +       last_used += vq->packed.desc_state[id].num;
> +       if (unlikely(last_used >= vq->packed.vring.num)) {
> +               last_used -= vq->packed.vring.num;
> +               used_wrap_counter ^= 1;
> +       }
> +
> +       last_used = (last_used | (used_wrap_counter << 
> VRING_PACKED_EVENT_F_WRAP_CTR));
> +       WRITE_ONCE(vq->last_used_idx, last_used);
> +
> +       /*
> +        * If we expect an interrupt for the next entry, tell host
> +        * by writing event index and flush out the write before
> +        * the read in the next get_buf call.
> +        */
> +       if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
> +               virtio_store_mb(vq->weak_barriers,
> +                               &vq->packed.vring.driver->off_wrap,
> +                               cpu_to_le16(vq->last_used_idx));
> +}
> +
>  static void *virtqueue_get_buf_ctx_packed(struct vring_virtqueue *vq,
>                                           unsigned int *len,
>                                           void **ctx)
> @@ -1792,24 +1816,7 @@ static void *virtqueue_get_buf_ctx_packed(struct 
> vring_virtqueue *vq,
>         ret = vq->packed.desc_state[id].data;
>         detach_buf_packed(vq, id, ctx);
>
> -       last_used += vq->packed.desc_state[id].num;
> -       if (unlikely(last_used >= vq->packed.vring.num)) {
> -               last_used -= vq->packed.vring.num;
> -               used_wrap_counter ^= 1;
> -       }
> -
> -       last_used = (last_used | (used_wrap_counter << 
> VRING_PACKED_EVENT_F_WRAP_CTR));
> -       WRITE_ONCE(vq->last_used_idx, last_used);
> -
> -       /*
> -        * If we expect an interrupt for the next entry, tell host
> -        * by writing event index and flush out the write before
> -        * the read in the next get_buf call.
> -        */
> -       if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
> -               virtio_store_mb(vq->weak_barriers,
> -                               &vq->packed.vring.driver->off_wrap,
> -                               cpu_to_le16(vq->last_used_idx));
> +       update_last_used_idx_packed(vq, id, last_used, used_wrap_counter);
>
>         LAST_ADD_TIME_INVALID(vq);
>
> --
> 2.42.0
>


Reply via email to