On Thu,  7 Nov 2024 16:54:54 +0800, Xuan Zhuo <xuanz...@linux.alibaba.com> 
wrote:
> The subsequent commit needs to know whether every indirect buffer is
> premapped or not. So we need to introduce an extra struct for every
> indirect buffer to record this info.
>
> Signed-off-by: Xuan Zhuo <xuanz...@linux.alibaba.com>


Hi, Jason

This also needs a review.

Thanks.


> ---
>  drivers/virtio/virtio_ring.c | 60 +++++++++++++++++++++---------------
>  1 file changed, 36 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index 405d5a348795..cfe70c40f630 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -78,7 +78,11 @@ struct vring_desc_state_split {
>
>  struct vring_desc_state_packed {
>       void *data;                     /* Data for callback. */
> -     struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
> +
> +     /* Indirect desc table and extra table, if any. These two will be
> +      * allocated together. So we won't stress more to the memory allocator.
> +      */
> +     struct vring_packed_desc *indir_desc;
>       u16 num;                        /* Descriptor list length. */
>       u16 last;                       /* The last desc state in a list. */
>  };
> @@ -1238,27 +1242,12 @@ static void vring_unmap_extra_packed(const struct 
> vring_virtqueue *vq,
>       }
>  }
>
> -static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
> -                                 const struct vring_packed_desc *desc)
> -{
> -     u16 flags;
> -
> -     if (!vring_need_unmap_buffer(vq))
> -             return;
> -
> -     flags = le16_to_cpu(desc->flags);
> -
> -     dma_unmap_page(vring_dma_dev(vq),
> -                    le64_to_cpu(desc->addr),
> -                    le32_to_cpu(desc->len),
> -                    (flags & VRING_DESC_F_WRITE) ?
> -                    DMA_FROM_DEVICE : DMA_TO_DEVICE);
> -}
> -
>  static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
>                                                      gfp_t gfp)
>  {
> +     struct vring_desc_extra *extra;
>       struct vring_packed_desc *desc;
> +     int i, size;
>
>       /*
>        * We require lowmem mappings for the descriptors because
> @@ -1267,7 +1256,16 @@ static struct vring_packed_desc 
> *alloc_indirect_packed(unsigned int total_sg,
>        */
>       gfp &= ~__GFP_HIGHMEM;
>
> -     desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
> +     size = (sizeof(*desc) + sizeof(*extra)) * total_sg;
> +
> +     desc = kmalloc(size, gfp);
> +     if (!desc)
> +             return NULL;
> +
> +     extra = (struct vring_desc_extra *)&desc[total_sg];
> +
> +     for (i = 0; i < total_sg; i++)
> +             extra[i].next = i + 1;
>
>       return desc;
>  }
> @@ -1280,6 +1278,7 @@ static int virtqueue_add_indirect_packed(struct 
> vring_virtqueue *vq,
>                                        void *data,
>                                        gfp_t gfp)
>  {
> +     struct vring_desc_extra *extra;
>       struct vring_packed_desc *desc;
>       struct scatterlist *sg;
>       unsigned int i, n, err_idx;
> @@ -1291,6 +1290,8 @@ static int virtqueue_add_indirect_packed(struct 
> vring_virtqueue *vq,
>       if (!desc)
>               return -ENOMEM;
>
> +     extra = (struct vring_desc_extra *)&desc[total_sg];
> +
>       if (unlikely(vq->vq.num_free < 1)) {
>               pr_debug("Can't add buf len 1 - avail = 0\n");
>               kfree(desc);
> @@ -1312,6 +1313,13 @@ static int virtqueue_add_indirect_packed(struct 
> vring_virtqueue *vq,
>                                               0 : VRING_DESC_F_WRITE);
>                       desc[i].addr = cpu_to_le64(addr);
>                       desc[i].len = cpu_to_le32(sg->length);
> +
> +                     if (unlikely(vq->use_dma_api)) {
> +                             extra[i].addr = addr;
> +                             extra[i].len = sg->length;
> +                             extra[i].flags = n < out_sgs ?  0 : 
> VRING_DESC_F_WRITE;
> +                     }
> +
>                       i++;
>               }
>       }
> @@ -1381,7 +1389,7 @@ static int virtqueue_add_indirect_packed(struct 
> vring_virtqueue *vq,
>       err_idx = i;
>
>       for (i = 0; i < err_idx; i++)
> -             vring_unmap_desc_packed(vq, &desc[i]);
> +             vring_unmap_extra_packed(vq, &extra[i]);
>
>  free_desc:
>       kfree(desc);
> @@ -1617,7 +1625,8 @@ static void detach_buf_packed(struct vring_virtqueue 
> *vq,
>       }
>
>       if (vq->indirect) {
> -             u32 len;
> +             struct vring_desc_extra *extra;
> +             u32 len, num;
>
>               /* Free the indirect table, if any, now that it's unmapped. */
>               desc = state->indir_desc;
> @@ -1626,9 +1635,12 @@ static void detach_buf_packed(struct vring_virtqueue 
> *vq,
>
>               if (vring_need_unmap_buffer(vq)) {
>                       len = vq->packed.desc_extra[id].len;
> -                     for (i = 0; i < len / sizeof(struct vring_packed_desc);
> -                                     i++)
> -                             vring_unmap_desc_packed(vq, &desc[i]);
> +                     num = len / sizeof(struct vring_packed_desc);
> +
> +                     extra = (struct vring_desc_extra *)&desc[num];
> +
> +                     for (i = 0; i < num; i++)
> +                             vring_unmap_extra_packed(vq, &extra[i]);
>               }
>               kfree(desc);
>               state->indir_desc = NULL;
> --
> 2.32.0.3.g01195cf9f
>

Reply via email to