On Fri, Sep 06, 2024 at 06:02:50PM +0800, Xuan Zhuo wrote:
> On Fri, 6 Sep 2024 05:52:36 -0400, "Michael S. Tsirkin" <m...@redhat.com> 
> wrote:
> > This reverts commit defd28aa5acb0fd7c15adc6bc40a8ac277d04dea.
> >
> > leads to crashes with no ACCESS_PLATFORM when
> > sysctl net.core.high_order_alloc_disable=1
> >
> > Cc: Xuan Zhuo <xuanz...@linux.alibaba.com>
> > Reported-by: Si-Wei Liu <si-wei....@oracle.com>
> > Signed-off-by: Michael S. Tsirkin <m...@redhat.com>
> > ---
> >  drivers/net/virtio_net.c | 89 +++++++++++++++++++++++-----------------
> >  1 file changed, 52 insertions(+), 37 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index 0944430dfb1f..0a2ec9570521 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -348,6 +348,9 @@ struct receive_queue {
> >
> >     /* Record the last dma info to free after new pages is allocated. */
> >     struct virtnet_rq_dma *last_dma;
> > +
> > +   /* Do dma by self */
> > +   bool do_dma;
> >  };
> >
> >  /* This structure can contain rss message with maximum settings for 
> > indirection table and keysize
> > @@ -867,7 +870,7 @@ static void *virtnet_rq_get_buf(struct receive_queue 
> > *rq, u32 *len, void **ctx)
> >     void *buf;
> >
> >     buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
> > -   if (buf)
> > +   if (buf && rq->do_dma)
> >             virtnet_rq_unmap(rq, buf, *len);
> >
> >     return buf;
> > @@ -880,6 +883,11 @@ static void virtnet_rq_init_one_sg(struct 
> > receive_queue *rq, void *buf, u32 len)
> >     u32 offset;
> >     void *head;
> >
> > +   if (!rq->do_dma) {
> > +           sg_init_one(rq->sg, buf, len);
> > +           return;
> > +   }
> > +
> >     head = page_address(rq->alloc_frag.page);
> >
> >     offset = buf - head;
> > @@ -905,42 +913,44 @@ static void *virtnet_rq_alloc(struct receive_queue 
> > *rq, u32 size, gfp_t gfp)
> >
> >     head = page_address(alloc_frag->page);
> >
> > -   dma = head;
> > +   if (rq->do_dma) {
> > +           dma = head;
> >
> > -   /* new pages */
> > -   if (!alloc_frag->offset) {
> > -           if (rq->last_dma) {
> > -                   /* Now, the new page is allocated, the last dma
> > -                    * will not be used. So the dma can be unmapped
> > -                    * if the ref is 0.
> > +           /* new pages */
> > +           if (!alloc_frag->offset) {
> > +                   if (rq->last_dma) {
> > +                           /* Now, the new page is allocated, the last dma
> > +                            * will not be used. So the dma can be unmapped
> > +                            * if the ref is 0.
> > +                            */
> > +                           virtnet_rq_unmap(rq, rq->last_dma, 0);
> > +                           rq->last_dma = NULL;
> > +                   }
> > +
> > +                   dma->len = alloc_frag->size - sizeof(*dma);
> > +
> > +                   addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
> > +                                                         dma->len, 
> > DMA_FROM_DEVICE, 0);
> > +                   if (virtqueue_dma_mapping_error(rq->vq, addr))
> > +                           return NULL;
> > +
> > +                   dma->addr = addr;
> > +                   dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
> > +
> > +                   /* Add a reference to dma to prevent the entire dma from
> > +                    * being released during error handling. This reference
> > +                    * will be freed after the pages are no longer used.
> >                      */
> > -                   virtnet_rq_unmap(rq, rq->last_dma, 0);
> > -                   rq->last_dma = NULL;
> > +                   get_page(alloc_frag->page);
> > +                   dma->ref = 1;
> > +                   alloc_frag->offset = sizeof(*dma);
> > +
> > +                   rq->last_dma = dma;
> >             }
> >
> > -           dma->len = alloc_frag->size - sizeof(*dma);
> > -
> > -           addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
> > -                                                 dma->len, 
> > DMA_FROM_DEVICE, 0);
> > -           if (virtqueue_dma_mapping_error(rq->vq, addr))
> > -                   return NULL;
> > -
> > -           dma->addr = addr;
> > -           dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
> > -
> > -           /* Add a reference to dma to prevent the entire dma from
> > -            * being released during error handling. This reference
> > -            * will be freed after the pages are no longer used.
> > -            */
> > -           get_page(alloc_frag->page);
> > -           dma->ref = 1;
> > -           alloc_frag->offset = sizeof(*dma);
> > -
> > -           rq->last_dma = dma;
> > +           ++dma->ref;
> >     }
> >
> > -   ++dma->ref;
> > -
> >     buf = head + alloc_frag->offset;
> >
> >     get_page(alloc_frag->page);
> > @@ -957,9 +967,12 @@ static void virtnet_rq_set_premapped(struct 
> > virtnet_info *vi)
> >     if (!vi->mergeable_rx_bufs && vi->big_packets)
> >             return;
> >
> > -   for (i = 0; i < vi->max_queue_pairs; i++)
> > -           /* error should never happen */
> > -           BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
> > +   for (i = 0; i < vi->max_queue_pairs; i++) {
> > +           if (virtqueue_set_dma_premapped(vi->rq[i].vq))
> > +                   continue;
> > +
> > +           vi->rq[i].do_dma = true;
> > +   }
> 
> This is too much code to revert. We can just revert this and next one.
> And add a patch to turn off the default premapped setting (return from this
> function directly). Otherwise, we will have to do all the work again in the
> future.
> 
> There is no need to revert xsk related code, xsk function cannot be enabled, 
> in
> the case that premapped mode is not turned on. There is no direct impact 
> itself.
> 
> Thanks.

I tried but quickly got lost as the automatic
revert did not work, and it's very close to release, so
I wanted to be sure it's right.

Post your own version of a revert for testing then please.


> 
> >  }
> >
> >  static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
> > @@ -2107,7 +2120,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, 
> > struct receive_queue *rq,
> >
> >     err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
> >     if (err < 0) {
> > -           virtnet_rq_unmap(rq, buf, 0);
> > +           if (rq->do_dma)
> > +                   virtnet_rq_unmap(rq, buf, 0);
> >             put_page(virt_to_head_page(buf));
> >     }
> >
> > @@ -2221,7 +2235,8 @@ static int add_recvbuf_mergeable(struct virtnet_info 
> > *vi,
> >     ctx = mergeable_len_to_ctx(len + room, headroom);
> >     err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
> >     if (err < 0) {
> > -           virtnet_rq_unmap(rq, buf, 0);
> > +           if (rq->do_dma)
> > +                   virtnet_rq_unmap(rq, buf, 0);
> >             put_page(virt_to_head_page(buf));
> >     }
> >
> > @@ -5392,7 +5407,7 @@ static void free_receive_page_frags(struct 
> > virtnet_info *vi)
> >     int i;
> >     for (i = 0; i < vi->max_queue_pairs; i++)
> >             if (vi->rq[i].alloc_frag.page) {
> > -                   if (vi->rq[i].last_dma)
> > +                   if (vi->rq[i].do_dma && vi->rq[i].last_dma)
> >                             virtnet_rq_unmap(&vi->rq[i], 
> > vi->rq[i].last_dma, 0);
> >                     put_page(vi->rq[i].alloc_frag.page);
> >             }
> > --
> > MST
> >


Reply via email to