On Fri, 20 Dec 2024 11:49:55 +0800 Yunjian Wang <wangyunj...@huawei.com> wrote:
> The hdr->csum_start does two successive reads from user space to read a > variable length data structure. The result overflow if the data structure > changes between the two reads. > > To fix this, we can prevent double fetch issue by copying virtio_hdr to > the temporary variable. > > Fixes: 4dc4e33ffa10 ("net/virtio: fix Rx checksum calculation") > Cc: sta...@dpdk.org > > Signed-off-by: Yunjian Wang <wangyunj...@huawei.com> How about something like the following *untested* diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index 69901ab3b5..c65cb639b2 100644 --- a/lib/vhost/virtio_net.c +++ b/lib/vhost/virtio_net.c @@ -2861,25 +2861,28 @@ vhost_dequeue_offload(struct virtio_net *dev, struct virtio_net_hdr *hdr, } } -static __rte_noinline void +static inline int copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr, - struct buf_vector *buf_vec) + const struct buf_vector *buf_vec, + uint16_t nr_vec) { - uint64_t len; - uint64_t remain = sizeof(struct virtio_net_hdr); - uint64_t src; - uint64_t dst = (uint64_t)(uintptr_t)hdr; + size_t remain = sizeof(struct virtio_net_hdr); + uint8_t *dst = (uint8_t *)hdr; - while (remain) { - len = RTE_MIN(remain, buf_vec->buf_len); - src = buf_vec->buf_addr; - rte_memcpy((void *)(uintptr_t)dst, - (void *)(uintptr_t)src, len); + while (remain > 0) { + size_t len = RTE_MIN(remain, buf_vec->buf_len); + const void *src = (const void *)(uintptr_t)buf_vec->buf_addr; + if (unlikely(nr_vec == 0)) + return -1; + + memcpy(dst, src, len); remain -= len; dst += len; buf_vec++; + --nr_vec; } + return 0; } static __rte_always_inline int @@ -2908,16 +2911,12 @@ desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, */ if (virtio_net_with_host_offload(dev)) { - if (unlikely(buf_vec[0].buf_len < sizeof(struct virtio_net_hdr))) { - /* - * No luck, the virtio-net header doesn't fit - * in a contiguous virtual area. - */ - copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec); - hdr = &tmp_hdr; - } else { - hdr = (struct virtio_net_hdr *)((uintptr_t)buf_vec[0].buf_addr); - } + if (unlikely(copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec, nr_vec) != 0)) + return -1; + + /* ensure that compiler does not delay copy */ + rte_compiler_barrier(); + hdr = &tmp_hdr; } for (vec_idx = 0; vec_idx < nr_vec; vec_idx++) { @@ -3363,7 +3362,6 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev, { uint16_t avail_idx = vq->last_avail_idx; uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf); - struct virtio_net_hdr *hdr; uintptr_t desc_addrs[PACKED_BATCH_SIZE]; uint16_t ids[PACKED_BATCH_SIZE]; uint16_t i; @@ -3382,8 +3380,12 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev, if (virtio_net_with_host_offload(dev)) { vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { - hdr = (struct virtio_net_hdr *)(desc_addrs[i]); - vhost_dequeue_offload(dev, hdr, pkts[i], legacy_ol_flags); + struct virtio_net_hdr hdr; + + memcpy(&hdr, (void *)desc_addrs[i], sizeof(struct virtio_net_hdr)); + rte_compiler_barrier(); + + vhost_dequeue_offload(dev, &hdr, pkts[i], legacy_ol_flags); } }