Re: [PATCH 08/26] virtio-mem: use array_size

2023-06-24 Thread Xuan Zhuo
On Fri, 23 Jun 2023 23:14:39 +0200, Julia Lawall  wrote:
> Use array_size to protect against multiplication overflows.
>
> The changes were done using the following Coccinelle semantic patch:
>
> // 
> @@
> expression E1, E2;
> constant C1, C2;
> identifier alloc = {vmalloc,vzalloc};
> @@
>
> (
>   alloc(C1 * C2,...)
> |
>   alloc(
> -   (E1) * (E2)
> +   array_size(E1, E2)
>   ,...)
> )
> // 
>
> Signed-off-by: Julia Lawall 

Reviewed-by: Xuan Zhuo 

>
> ---
>  drivers/virtio/virtio_mem.c |6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
> index 835f6cc2fb66..a4dfe7aab288 100644
> --- a/drivers/virtio/virtio_mem.c
> +++ b/drivers/virtio/virtio_mem.c
> @@ -399,7 +399,7 @@ static int 
> virtio_mem_bbm_bb_states_prepare_next_bb(struct virtio_mem *vm)
>   if (vm->bbm.bb_states && old_pages == new_pages)
>   return 0;
>
> - new_array = vzalloc(new_pages * PAGE_SIZE);
> + new_array = vzalloc(array_size(new_pages, PAGE_SIZE));
>   if (!new_array)
>   return -ENOMEM;
>
> @@ -465,7 +465,7 @@ static int 
> virtio_mem_sbm_mb_states_prepare_next_mb(struct virtio_mem *vm)
>   if (vm->sbm.mb_states && old_pages == new_pages)
>   return 0;
>
> - new_array = vzalloc(new_pages * PAGE_SIZE);
> + new_array = vzalloc(array_size(new_pages, PAGE_SIZE));
>   if (!new_array)
>   return -ENOMEM;
>
> @@ -588,7 +588,7 @@ static int 
> virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm)
>   if (vm->sbm.sb_states && old_pages == new_pages)
>   return 0;
>
> - new_bitmap = vzalloc(new_pages * PAGE_SIZE);
> + new_bitmap = vzalloc(array_size(new_pages, PAGE_SIZE));
>   if (!new_bitmap)
>   return -ENOMEM;
>
>
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH vhost v10 05/10] virtio_ring: split-detach: support return dma info to driver

2023-06-24 Thread Xuan Zhuo
On Thu, 22 Jun 2023 15:36:41 -0400, "Michael S. Tsirkin"  
wrote:
> On Fri, Jun 02, 2023 at 05:22:01PM +0800, Xuan Zhuo wrote:
> > Under the premapped mode, the driver needs to unmap the DMA address
> > after receiving the buffer. The virtio core records the DMA address,
> > so the driver needs a way to get the dma info from the virtio core.
> >
> > A straightforward approach is to pass an array to the virtio core when
> > calling virtqueue_get_buf(). However, it is not feasible when there are
> > multiple DMA addresses in the descriptor chain, and the array size is
> > unknown.
> >
> > To solve this problem, a helper be introduced. After calling
> > virtqueue_get_buf(), the driver can call the helper to
> > retrieve a dma info. If the helper function returns -EAGAIN, it means
> > that there are more DMA addresses to be processed, and the driver should
> > call the helper function again. To keep track of the current position in
> > the chain, a cursor must be passed to the helper function, which is
> > initialized by virtqueue_get_buf().
> >
> > Some processes are done inside this helper, so this helper MUST be
> > called under the premapped mode.
> >
> > Signed-off-by: Xuan Zhuo 
> > ---
> >  drivers/virtio/virtio_ring.c | 118 ---
> >  include/linux/virtio.h   |  11 
> >  2 files changed, 119 insertions(+), 10 deletions(-)
> >
> > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> > index dc109fbc05a5..cdc4349f6066 100644
> > --- a/drivers/virtio/virtio_ring.c
> > +++ b/drivers/virtio/virtio_ring.c
> > @@ -754,8 +754,95 @@ static bool virtqueue_kick_prepare_split(struct 
> > virtqueue *_vq)
> > return needs_kick;
> >  }
> >
> > -static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
> > -void **ctx)
> > +static void detach_cursor_init_split(struct vring_virtqueue *vq,
> > +struct virtqueue_detach_cursor *cursor, 
> > u16 head)
> > +{
> > +   struct vring_desc_extra *extra;
> > +
> > +   extra = &vq->split.desc_extra[head];
> > +
> > +   /* Clear data ptr. */
> > +   vq->split.desc_state[head].data = NULL;
> > +
> > +   cursor->head = head;
> > +   cursor->done = 0;
> > +
> > +   if (extra->flags & VRING_DESC_F_INDIRECT) {
> > +   cursor->num = extra->len / sizeof(struct vring_desc);
> > +   cursor->indirect = true;
> > +   cursor->pos = 0;
> > +
> > +   vring_unmap_one_split(vq, head);
> > +
> > +   extra->next = vq->free_head;
> > +
> > +   vq->free_head = head;
> > +
> > +   /* Plus final descriptor */
> > +   vq->vq.num_free++;
> > +
> > +   } else {
> > +   cursor->indirect = false;
> > +   cursor->pos = head;
> > +   }
> > +}
> > +
> > +static int virtqueue_detach_split(struct virtqueue *_vq, struct 
> > virtqueue_detach_cursor *cursor,
> > + dma_addr_t *addr, u32 *len, enum 
> > dma_data_direction *dir)
> > +{
>
> I don't get it. This is generic split vq code?

NO. This is the api for split vq when the address is mapped by the driver.

> Why is it unconditionally
> wasting time with cursors etc? Poking at split.desc_extra when not
> necessary is also not really nice, will cause lots of cache misses.

virtqueue_get_buf_ctx_split() is the generic code.

I just add the checking of vq->premapped.

>
> And it looks like we duplicated a bunch of logic?

Yes.

The detach_buf_split() is the origin logic.
But now, the driver needs to get the dma info of every desc, so
I break the loop of the detach_buf_split().
But, the logic is simple, so I think it is ok.

virtqueue_detach_split() return the dma info of every desc.
detach_cursor_init_split() init the cursor inside virtqueue_get_buf_ctx_split().

>
>
> > +   struct vring_virtqueue *vq = to_vvq(_vq);
> > +   __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
> > +   int rc = -EAGAIN;
> > +
> > +   if (unlikely(cursor->done))
> > +   return -EINVAL;
> > +
> > +   if (!cursor->indirect) {
> > +   struct vring_desc_extra *extra;
> > +   unsigned int i;
> > +
> > +   i = cursor->pos;
> > +
> > +   extra = &vq->split.desc_extra[i];
> > +
> > +   if (vq->split.vring.desc[i].flags & nextflag) {
> > +   cursor->pos = extra->next;
> > +   } else {
> > +   extra->next = vq->free_head;
> > +   vq->free_head = cursor->head;
> > +   cursor->done = true;
> > +   rc = 0;
> > +   }
> > +
> > +   *addr = extra->addr;
> > +   *len = extra->len;
> > +   *dir = (extra->flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : 
> > DMA_TO_DEVICE;
> > +
> > +   vq->vq.num_free++;
> > +
> > +   } else {
> > +   struct vring_desc *indir_desc, *desc;
> > +   u16 flags;
> > +
> > +   indir_desc = vq->split.desc_state[cursor->head

Re: [PATCH vhost v10 10/10] virtio_net: support dma premapped

2023-06-24 Thread Xuan Zhuo
On Thu, 22 Jun 2023 08:15:03 -0400, "Michael S. Tsirkin"  
wrote:
> On Fri, Jun 02, 2023 at 05:22:06PM +0800, Xuan Zhuo wrote:
> > Introduce the module param "experiment_premapped" to enable the function
> > that the virtio-net do dma mapping.
> >
> > If that is true, the vq of virtio-net is under the premapped mode.
> > It just handle the sg with dma_address. And the driver must get the dma
> > address of the buffer to unmap after get the buffer from virtio core.
> >
> > That will be useful when AF_XDP is enable, AF_XDP tx and the kernel packet
> > xmit will share the tx queue, so the skb xmit must support the premapped
> > mode.
> >
> > Signed-off-by: Xuan Zhuo 
>
>
> I put this in next but I don't think this is going upstream
> in its current form,

I agree.

> certainly not with the experiment_premapped mod config
> that no one will know how to enable. If you want to experiment,
> keep it in your private tree, experimenting on humans requires
> an ethics board approval and consent forms :)

^_^

Maybe, this patchset should not include this patch,
this patch should work with the patch that uses the premapped.

>
> Spreading the "premapped" boolean all of the place is also
> far from pretty, I wonder why we can't only specify it when adding.

I guess you mean that we just use the "premapped" for the virtio api.

I will try.

Thanks.


>
> > ---
> >  drivers/net/virtio_net.c | 163 +--
> >  1 file changed, 141 insertions(+), 22 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index 2396c28c0122..5898212fcb3c 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -26,10 +26,11 @@
> >  static int napi_weight = NAPI_POLL_WEIGHT;
> >  module_param(napi_weight, int, 0444);
> >
> > -static bool csum = true, gso = true, napi_tx = true;
> > +static bool csum = true, gso = true, napi_tx = true, experiment_premapped;
> >  module_param(csum, bool, 0444);
> >  module_param(gso, bool, 0444);
> >  module_param(napi_tx, bool, 0644);
> > +module_param(experiment_premapped, bool, 0644);
> >
> >  /* FIXME: MTU in config. */
> >  #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
> > @@ -142,6 +143,9 @@ struct send_queue {
> >
> > /* Record whether sq is in reset state. */
> > bool reset;
> > +
> > +   /* The vq is premapped mode. */
> > +   bool premapped;
> >  };
> >
> >  /* Internal representation of a receive virtqueue */
> > @@ -174,6 +178,9 @@ struct receive_queue {
> > char name[16];
> >
> > struct xdp_rxq_info xdp_rxq;
> > +
> > +   /* The vq is premapped mode. */
> > +   bool premapped;
> >  };
> >
> >  /* This structure can contain rss message with maximum settings for 
> > indirection table and keysize
> > @@ -546,6 +553,105 @@ static struct sk_buff *page_to_skb(struct 
> > virtnet_info *vi,
> > return skb;
> >  }
> >
> > +static int virtnet_generic_unmap(struct virtqueue *vq, struct 
> > virtqueue_detach_cursor *cursor)
> > +{
> > +   enum dma_data_direction dir;
> > +   dma_addr_t addr;
> > +   u32 len;
> > +   int err;
> > +
> > +   do {
> > +   err = virtqueue_detach(vq, cursor, &addr, &len, &dir);
> > +   if (!err || err == -EAGAIN)
> > +   dma_unmap_page_attrs(virtqueue_dma_dev(vq), addr, len, 
> > dir, 0);
> > +
> > +   } while (err == -EAGAIN);
> > +
> > +   return err;
> > +}
> > +
> > +static void *virtnet_detach_unused_buf(struct virtqueue *vq, bool 
> > premapped)
> > +{
> > +   struct virtqueue_detach_cursor cursor;
> > +   void *buf;
> > +
> > +   if (!premapped)
> > +   return virtqueue_detach_unused_buf(vq);
> > +
> > +   buf = virtqueue_detach_unused_buf_premapped(vq, &cursor);
> > +   if (buf)
> > +   virtnet_generic_unmap(vq, &cursor);
> > +
> > +   return buf;
> > +}
> > +
> > +static void *virtnet_get_buf_ctx(struct virtqueue *vq, bool premapped, u32 
> > *len, void **ctx)
> > +{
> > +   struct virtqueue_detach_cursor cursor;
> > +   void *buf;
> > +
> > +   if (!premapped)
> > +   return virtqueue_get_buf_ctx(vq, len, ctx);
> > +
> > +   buf = virtqueue_get_buf_premapped(vq, len, ctx, &cursor);
> > +   if (buf)
> > +   virtnet_generic_unmap(vq, &cursor);
> > +
> > +   return buf;
> > +}
> > +
> > +#define virtnet_rq_get_buf(rq, plen, pctx) \
> > +({ \
> > +   typeof(rq) _rq = (rq); \
> > +   virtnet_get_buf_ctx(_rq->vq, _rq->premapped, plen, pctx); \
> > +})
> > +
> > +#define virtnet_sq_get_buf(sq, plen, pctx) \
> > +({ \
> > +   typeof(sq) _sq = (sq); \
> > +   virtnet_get_buf_ctx(_sq->vq, _sq->premapped, plen, pctx); \
> > +})
> > +
> > +static int virtnet_add_sg(struct virtqueue *vq, bool premapped,
> > + struct scatterlist *sg, unsigned int num, bool out,
> > + void *data, void *ctx, gfp_t gfp)
> > +{
> > +   enum dma_data_direction dir;
> > +   struct device *dev;
> > +   int err, ret;
> > +
> > +   if (!premapped)
> > +   return virtqueue_ad