Hi Maxime,

> -----Original Message-----
> From: Maxime Coquelin <maxime.coque...@redhat.com>
> Sent: Monday, December 21, 2020 5:14 AM
> To: dev@dpdk.org; Xia, Chenbo <chenbo....@intel.com>; olivier.m...@6wind.com;
> amore...@redhat.com; david.march...@redhat.com
> Cc: Maxime Coquelin <maxime.coque...@redhat.com>
> Subject: [PATCH 08/40] net/virtio: force IOVA as VA mode for Virtio-user
> 
> At least Vhost-user backend of Virtio-user PMD requires
> IOVA as VA mode. Until now, it was implemented as a hack
> by forcing to use mbuf's buf_addr field instead of buf_iova.
> 
> This patcv removes all this logic and just fails probing

s/patcv/patch

With this fix:

Reviewed-by: Chenbo Xia <chenbo....@intel.com>

> if IOVA as VA mode is not selected. It simplifies the
> code overall, and removes some bus-specific logic from
> generic virtio_ethdev.c.
> 
> Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com>
> ---
>  drivers/net/virtio/virtio_ethdev.c          | 15 ---------
>  drivers/net/virtio/virtio_rxtx.c            | 34 +++++++++------------
>  drivers/net/virtio/virtio_rxtx_packed_avx.c | 10 +++---
>  drivers/net/virtio/virtio_rxtx_simple.h     |  3 +-
>  drivers/net/virtio/virtio_user_ethdev.c     | 11 +++++++
>  drivers/net/virtio/virtqueue.h              | 25 +--------------
>  6 files changed, 32 insertions(+), 66 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index 67f6be3fa8..13e2ec998a 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -576,21 +576,6 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> vtpci_queue_idx)
>               hw->cvq = cvq;
>       }
> 
> -     /* For virtio_user case (that is when hw->virtio_user_dev is not NULL),
> -      * we use virtual address. And we need properly set _offset_, please see
> -      * VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
> -      */
> -     if (hw->bus_type == VIRTIO_BUS_PCI_LEGACY || hw->bus_type ==
> VIRTIO_BUS_PCI_MODERN) {
> -             vq->offset = offsetof(struct rte_mbuf, buf_iova);
> -     } else if (hw->bus_type == VIRTIO_BUS_USER) {
> -             vq->vq_ring_mem = (uintptr_t)mz->addr;
> -             vq->offset = offsetof(struct rte_mbuf, buf_addr);
> -             if (queue_type == VTNET_TQ)
> -                     txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
> -             else if (queue_type == VTNET_CQ)
> -                     cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
> -     }
> -
>       if (queue_type == VTNET_TQ) {
>               struct virtio_tx_region *txr;
>               unsigned int i;
> diff --git a/drivers/net/virtio/virtio_rxtx.c
> b/drivers/net/virtio/virtio_rxtx.c
> index 77934e8c58..93fe856cbd 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -271,13 +271,10 @@ virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
>               dxp->cookie = (void *)cookies[i];
>               dxp->ndescs = 1;
> 
> -             start_dp[idx].addr =
> -                             VIRTIO_MBUF_ADDR(cookies[i], vq) +
> -                             RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
> -             start_dp[idx].len =
> -                             cookies[i]->buf_len -
> -                             RTE_PKTMBUF_HEADROOM +
> -                             hw->vtnet_hdr_size;
> +             start_dp[idx].addr = cookies[i]->buf_iova +
> +                     RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
> +             start_dp[idx].len = cookies[i]->buf_len -
> +                     RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
>               start_dp[idx].flags =  VRING_DESC_F_WRITE;
> 
>               vq_update_avail_ring(vq, idx);
> @@ -313,12 +310,10 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq,
> struct rte_mbuf **cookie,
>               dxp->cookie = (void *)cookie[i];
>               dxp->ndescs = 1;
> 
> -             start_dp[idx].addr =
> -                     VIRTIO_MBUF_ADDR(cookie[i], vq) +
> +             start_dp[idx].addr = cookie[i]->buf_iova +
>                       RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
> -             start_dp[idx].len =
> -                     cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
> -                     hw->vtnet_hdr_size;
> +             start_dp[idx].len = cookie[i]->buf_len -
> +                     RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
>               start_dp[idx].flags = VRING_DESC_F_WRITE;
>               vq->vq_desc_head_idx = start_dp[idx].next;
>               vq_update_avail_ring(vq, idx);
> @@ -355,10 +350,10 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue
> *vq,
>               dxp->cookie = (void *)cookie[i];
>               dxp->ndescs = 1;
> 
> -             start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
> -                             RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
> -             start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
> -                                     + hw->vtnet_hdr_size;
> +             start_dp[idx].addr = cookie[i]->buf_iova +
> +                     RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
> +             start_dp[idx].len = cookie[i]->buf_len -
> +                     RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
> 
>               vq->vq_desc_head_idx = dxp->next;
>               if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
> @@ -455,8 +450,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
>               else
>                       virtqueue_xmit_offload(hdr, cookies[i], true);
> 
> -             start_dp[idx].addr  =
> -                     VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
> +             start_dp[idx].addr  = rte_mbuf_data_iova(cookies[i]) - 
> head_size;
>               start_dp[idx].len   = cookies[i]->data_len + head_size;
>               start_dp[idx].flags = 0;
> 
> @@ -503,7 +497,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx 
> *txvq,
>       else
>               virtqueue_xmit_offload(hdr, cookie, true);
> 
> -     dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
> +     dp->addr = rte_mbuf_data_iova(cookie) - head_size;
>       dp->len  = cookie->data_len + head_size;
>       dp->id   = id;
> 
> @@ -590,7 +584,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct
> rte_mbuf *cookie,
>       virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
> 
>       do {
> -             start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
> +             start_dp[idx].addr  = rte_mbuf_data_iova(cookie);
>               start_dp[idx].len   = cookie->data_len;
>               if (prepend_header) {
>                       start_dp[idx].addr -= head_size;
> diff --git a/drivers/net/virtio/virtio_rxtx_packed_avx.c
> b/drivers/net/virtio/virtio_rxtx_packed_avx.c
> index 9bc62719ee..a6a49ec439 100644
> --- a/drivers/net/virtio/virtio_rxtx_packed_avx.c
> +++ b/drivers/net/virtio/virtio_rxtx_packed_avx.c
> @@ -133,13 +133,13 @@ virtqueue_enqueue_batch_packed_vec(struct virtnet_tx
> *txvq,
>       }
> 
>       __m512i descs_base = _mm512_set_epi64(tx_pkts[3]->data_len,
> -                     VIRTIO_MBUF_ADDR(tx_pkts[3], vq),
> +                     tx_pkts[3]->buf_iova,
>                       tx_pkts[2]->data_len,
> -                     VIRTIO_MBUF_ADDR(tx_pkts[2], vq),
> +                     tx_pkts[2]->buf_iova,
>                       tx_pkts[1]->data_len,
> -                     VIRTIO_MBUF_ADDR(tx_pkts[1], vq),
> +                     tx_pkts[1]->buf_iova,
>                       tx_pkts[0]->data_len,
> -                     VIRTIO_MBUF_ADDR(tx_pkts[0], vq));
> +                     tx_pkts[0]->buf_iova);
> 
>       /* id offset and data offset */
>       __m512i data_offsets = _mm512_set_epi64((uint64_t)3 << ID_BITS_OFFSET,
> @@ -536,7 +536,7 @@ virtio_recv_refill_packed_vec(struct virtnet_rx *rxvq,
>                       dxp = &vq->vq_descx[idx + i];
>                       dxp->cookie = (void *)cookie[total_num + i];
> 
> -                     addr = VIRTIO_MBUF_ADDR(cookie[total_num + i], vq) +
> +                     addr = cookie[total_num + i]->buf_iova +
>                               RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
>                       start_dp[idx + i].addr = addr;
>                       start_dp[idx + i].len = cookie[total_num + i]->buf_len
> diff --git a/drivers/net/virtio/virtio_rxtx_simple.h
> b/drivers/net/virtio/virtio_rxtx_simple.h
> index 3d1296a23c..f2a5aedf97 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple.h
> +++ b/drivers/net/virtio/virtio_rxtx_simple.h
> @@ -43,8 +43,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
>               p = (uintptr_t)&sw_ring[i]->rearm_data;
>               *(uint64_t *)p = rxvq->mbuf_initializer;
> 
> -             start_dp[i].addr =
> -                     VIRTIO_MBUF_ADDR(sw_ring[i], vq) +
> +             start_dp[i].addr = sw_ring[i]->buf_iova +
>                       RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
>               start_dp[i].len = sw_ring[i]->buf_len -
>                       RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
> diff --git a/drivers/net/virtio/virtio_user_ethdev.c
> b/drivers/net/virtio/virtio_user_ethdev.c
> index 1f1f63a1a5..f4775ff141 100644
> --- a/drivers/net/virtio/virtio_user_ethdev.c
> +++ b/drivers/net/virtio/virtio_user_ethdev.c
> @@ -663,6 +663,17 @@ virtio_user_pmd_probe(struct rte_vdev_device *vdev)
>       char *mac_addr = NULL;
>       int ret = -1;
> 
> +     /*
> +      * ToDo 1: Implement detection mechanism at vdev bus level as PCI, but
> +      * it implies API breakage.
> +      * ToDo 2: Check if all backends have this requirement. Likely
> +      * Vhost-vDPA and Vhost-Kernel are fine with PA IOVA mode.
> +      */
> +     if (rte_eal_iova_mode() != RTE_IOVA_VA) {
> +             PMD_INIT_LOG(ERR, "Probing failed, only VA IOVA mode 
> supported\n");
> +             return -1;
> +     }
> +
>       if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
>               const char *name = rte_vdev_device_name(vdev);
>               eth_dev = rte_eth_dev_attach_secondary(name);
> diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
> index 42c4c9882f..e4a1393816 100644
> --- a/drivers/net/virtio/virtqueue.h
> +++ b/drivers/net/virtio/virtqueue.h
> @@ -114,29 +114,6 @@ virtqueue_store_flags_packed(struct vring_packed_desc 
> *dp,
> 
>  #define VIRTQUEUE_MAX_NAME_SZ 32
> 
> -#ifdef RTE_VIRTIO_USER
> -/**
> - * Return the physical address (or virtual address in case of
> - * virtio-user) of mbuf data buffer.
> - *
> - * The address is firstly casted to the word size (sizeof(uintptr_t))
> - * before casting it to uint64_t. This is to make it work with different
> - * combination of word size (64 bit and 32 bit) and virtio device
> - * (virtio-pci and virtio-user).
> - */
> -#define VIRTIO_MBUF_ADDR(mb, vq) \
> -     ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset)))
> -#else
> -#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_iova)
> -#endif
> -
> -/**
> - * Return the physical address (or virtual address in case of
> - * virtio-user) of mbuf data buffer, taking care of mbuf data offset
> - */
> -#define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \
> -     (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
> -
>  #define VTNET_SQ_RQ_QUEUE_IDX 0
>  #define VTNET_SQ_TQ_QUEUE_IDX 1
>  #define VTNET_SQ_CQ_QUEUE_IDX 2
> @@ -764,7 +741,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq,
> struct rte_mbuf *cookie,
>       do {
>               uint16_t flags;
> 
> -             start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
> +             start_dp[idx].addr = rte_mbuf_data_iova(cookie);
>               start_dp[idx].len  = cookie->data_len;
>               if (prepend_header) {
>                       start_dp[idx].addr -= head_size;
> --
> 2.29.2

Reply via email to