Hi,

> -----Original Message-----
> From: Maxime Coquelin <maxime.coque...@redhat.com>
> Sent: Monday, January 11, 2021 10:25 PM
> To: Jiang, Cheng1 <cheng1.ji...@intel.com>; Xia, Chenbo
> <chenbo....@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu...@intel.com>; Yang, YvonneX
> <yvonnex.y...@intel.com>; Wang, Yinan <yinan.w...@intel.com>
> Subject: Re: [PATCH v8 2/2] examples/vhost: refactor vhost data path
> 
> 
> 
> On 1/11/21 6:52 AM, Cheng Jiang wrote:
> > Change the vm2vm data path to batch enqueue for better performance.
> > Support latest async vhost API, refactor vhost async data path,
> > replace rte_atomicNN_xxx to atomic_XXX and clean some codes.
> 
> Wouldn't it be better to use GCC/Clang C11 atmoic built-ins like all other 
> code
> is being migrated to it? (i.e. __atomic_XXX)

Sure, it will be fixed in the next version.

Thanks,
Cheng

> 
> > Signed-off-by: Cheng Jiang <cheng1.ji...@intel.com>
> > Reviewed-by: Jiayu Hu <jiayu...@intel.com>
> > ---
> >  examples/vhost/ioat.h |   2 +-
> >  examples/vhost/main.c | 226 ++++++++++++++++++++++++++++++------
> ------
> >  examples/vhost/main.h |   7 +-
> >  3 files changed, 168 insertions(+), 67 deletions(-)
> >
> > diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h index
> > d6e1e2e07..0a1dbb811 100644
> > --- a/examples/vhost/ioat.h
> > +++ b/examples/vhost/ioat.h
> > @@ -11,7 +11,7 @@
> >
> >  #define MAX_VHOST_DEVICE 1024
> >  #define IOAT_RING_SIZE 4096
> > -#define MAX_ENQUEUED_SIZE 256
> > +#define MAX_ENQUEUED_SIZE 512
> >
> >  struct dma_info {
> >     struct rte_pci_addr addr;
> > diff --git a/examples/vhost/main.c b/examples/vhost/main.c index
> > 22309977c..45976c93c 100644
> > --- a/examples/vhost/main.c
> > +++ b/examples/vhost/main.c
> > @@ -179,9 +179,22 @@ struct mbuf_table {
> >     struct rte_mbuf *m_table[MAX_PKT_BURST];  };
> >
> > +struct vhost_bufftable {
> > +   uint32_t len;
> > +   uint64_t pre_tsc;
> > +   struct rte_mbuf *m_table[MAX_PKT_BURST]; };
> > +
> >  /* TX queue for each data core. */
> >  struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
> >
> > +/*
> > + * Vhost TX buffer for each data core.
> > + * Every data core maintains a TX buffer for every vhost device,
> > + * which is used for batch pkts enqueue for higher performance.
> > + */
> > +struct vhost_bufftable *vhost_txbuff[RTE_MAX_LCORE *
> > +MAX_VHOST_DEVICE];
> > +
> >  #define MBUF_TABLE_DRAIN_TSC       ((rte_get_tsc_hz() + US_PER_S - 1) \
> >                              / US_PER_S * BURST_TX_DRAIN_US)
> >  #define VLAN_HLEN       4
> > @@ -804,43 +817,112 @@ unlink_vmdq(struct vhost_dev *vdev)
> >     }
> >  }
> >
> > +static inline void
> > +free_pkts(struct rte_mbuf **pkts, uint16_t n) {
> > +   while (n--)
> > +           rte_pktmbuf_free(pkts[n]);
> > +}
> > +
> >  static __rte_always_inline void
> > -virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
> > +complete_async_pkts(struct vhost_dev *vdev) {
> > +   struct rte_mbuf *p_cpl[MAX_PKT_BURST];
> > +   uint16_t complete_count;
> > +
> > +   complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
> > +                                   VIRTIO_RXQ, p_cpl,
> MAX_PKT_BURST);
> > +   if (complete_count) {
> > +           atomic_fetch_sub(&vdev->nr_async_pkts, complete_count);
> > +           free_pkts(p_cpl, complete_count);
> > +   }
> > +}
> > +
> > +static __rte_always_inline void
> > +sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev
> > +*src_vdev,
> >         struct rte_mbuf *m)
> >  {
> >     uint16_t ret;
> > -   struct rte_mbuf *m_cpl[1], *comp_pkt;
> > -   uint32_t nr_comp = 0;
> >
> >     if (builtin_net_driver) {
> >             ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
> > -   } else if (async_vhost_driver) {
> > -           ret = rte_vhost_submit_enqueue_burst(dst_vdev->vid,
> VIRTIO_RXQ,
> > -                                           &m, 1, &comp_pkt,
> &nr_comp);
> > -           if (nr_comp == 1)
> > -                   goto done;
> > -
> > -           if (likely(ret))
> > -                   dst_vdev->nr_async_pkts++;
> > -
> > -           while (likely(dst_vdev->nr_async_pkts)) {
> > -                   if (rte_vhost_poll_enqueue_completed(dst_vdev-
> >vid,
> > -                                   VIRTIO_RXQ, m_cpl, 1))
> > -                           dst_vdev->nr_async_pkts--;
> > -           }
> >     } else {
> >             ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ,
> &m, 1);
> >     }
> >
> > -done:
> >     if (enable_stats) {
> > -           rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
> > -           rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
> > +           atomic_fetch_add(&dst_vdev->stats.rx_total_atomic, 1);
> > +           atomic_fetch_add(&dst_vdev->stats.rx_atomic, ret);
> >             src_vdev->stats.tx_total++;
> >             src_vdev->stats.tx += ret;
> >     }
> >  }
> >
> > +static __rte_always_inline void
> > +drain_vhost(struct vhost_dev *vdev)
> > +{
> > +   uint16_t ret;
> > +   uint64_t buff_idx = rte_lcore_id() * MAX_VHOST_DEVICE + vdev-
> >vid;
> > +   uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
> > +   struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
> > +
> > +   if (builtin_net_driver) {
> > +           ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
> > +   } else if (async_vhost_driver) {
> > +           uint32_t cpu_cpl_nr = 0;
> > +           uint16_t enqueue_fail = 0;
> > +           struct rte_mbuf *m_cpu_cpl[nr_xmit];
> > +
> > +           complete_async_pkts(vdev);
> > +           ret = rte_vhost_submit_enqueue_burst(vdev->vid,
> VIRTIO_RXQ,
> > +                                   m, nr_xmit, m_cpu_cpl,
> &cpu_cpl_nr);
> > +           atomic_fetch_add(&vdev->nr_async_pkts, ret - cpu_cpl_nr);
> > +
> > +           if (cpu_cpl_nr)
> > +                   free_pkts(m_cpu_cpl, cpu_cpl_nr);
> > +
> > +           enqueue_fail = nr_xmit - ret;
> > +           if (enqueue_fail)
> > +                   free_pkts(&m[ret], nr_xmit - ret);
> > +   } else {
> > +           ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
> > +                                           m, nr_xmit);
> > +   }
> > +
> > +   if (enable_stats) {
> > +           atomic_fetch_add(&vdev->stats.rx_total_atomic, nr_xmit);
> > +           atomic_fetch_add(&vdev->stats.rx_atomic, ret);
> > +   }
> > +
> > +   if (!async_vhost_driver)
> > +           free_pkts(m, nr_xmit);
> > +}
> > +
> > +static __rte_always_inline void
> > +drain_vhost_table(void)
> > +{
> > +   uint16_t lcore_id = rte_lcore_id();
> > +   struct vhost_bufftable *vhost_txq;
> > +   struct vhost_dev *vdev;
> > +   uint64_t cur_tsc;
> > +
> > +   TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
> > +           vhost_txq = vhost_txbuff[lcore_id * MAX_VHOST_DEVICE
> > +                                           + vdev->vid];
> > +
> > +           cur_tsc = rte_rdtsc();
> > +           if (unlikely(cur_tsc - vhost_txq->pre_tsc
> > +                           > MBUF_TABLE_DRAIN_TSC)) {
> > +                   RTE_LOG_DP(DEBUG, VHOST_DATA,
> > +                           "Vhost TX queue drained after timeout with
> burst size %u\n",
> > +                           vhost_txq->len);
> > +                   drain_vhost(vdev);
> > +                   vhost_txq->len = 0;
> > +                   vhost_txq->pre_tsc = cur_tsc;
> > +           }
> > +   }
> > +}
> > +
> >  /*
> >   * Check if the packet destination MAC address is for a local device. If so
> then put
> >   * the packet on that devices RX queue. If not then return.
> > @@ -850,7 +932,8 @@ virtio_tx_local(struct vhost_dev *vdev, struct
> > rte_mbuf *m)  {
> >     struct rte_ether_hdr *pkt_hdr;
> >     struct vhost_dev *dst_vdev;
> > -
> > +   struct vhost_bufftable *vhost_txq;
> > +   uint16_t lcore_id = rte_lcore_id();
> >     pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
> >
> >     dst_vdev = find_vhost_dev(&pkt_hdr->d_addr); @@ -873,7 +956,19
> @@
> > virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
> >             return 0;
> >     }
> >
> > -   virtio_xmit(dst_vdev, vdev, m);
> > +   vhost_txq = vhost_txbuff[lcore_id * MAX_VHOST_DEVICE +
> dst_vdev->vid];
> > +   vhost_txq->m_table[vhost_txq->len++] = m;
> > +
> > +   if (enable_stats) {
> > +           vdev->stats.tx_total++;
> > +           vdev->stats.tx++;
> > +   }
> > +
> > +   if (unlikely(vhost_txq->len == MAX_PKT_BURST)) {
> > +           drain_vhost(dst_vdev);
> > +           vhost_txq->len = 0;
> > +           vhost_txq->pre_tsc = rte_rdtsc();
> > +   }
> >     return 0;
> >  }
> >
> > @@ -944,13 +1039,6 @@ static void virtio_tx_offload(struct rte_mbuf *m)
> >     tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);  }
> >
> > -static inline void
> > -free_pkts(struct rte_mbuf **pkts, uint16_t n) -{
> > -   while (n--)
> > -           rte_pktmbuf_free(pkts[n]);
> > -}
> > -
> >  static __rte_always_inline void
> >  do_drain_mbuf_table(struct mbuf_table *tx_q)  { @@ -983,16 +1071,14
> > @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m,
> > uint16_t vlan_tag)
> >
> >             TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry)
> {
> >                     if (vdev2 != vdev)
> > -                           virtio_xmit(vdev2, vdev, m);
> > +                           sync_virtio_xmit(vdev2, vdev, m);
> >             }
> >             goto queue2nic;
> >     }
> >
> >     /*check if destination is local VM*/
> > -   if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev,
> m) == 0)) {
> > -           rte_pktmbuf_free(m);
> > +   if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev,
> m) ==
> > +0))
> >             return;
> > -   }
> >
> >     if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
> >             if (unlikely(find_local_dest(vdev, m, &offset, @@ -1077,32
> +1163,15
> > @@ drain_mbuf_table(struct mbuf_table *tx_q)
> >     }
> >  }
> >
> > -static __rte_always_inline void
> > -complete_async_pkts(struct vhost_dev *vdev, uint16_t qid) -{
> > -   struct rte_mbuf *p_cpl[MAX_PKT_BURST];
> > -   uint16_t complete_count;
> > -
> > -   complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
> > -                                           qid, p_cpl, MAX_PKT_BURST);
> > -   vdev->nr_async_pkts -= complete_count;
> > -   if (complete_count)
> > -           free_pkts(p_cpl, complete_count);
> > -}
> > -
> >  static __rte_always_inline void
> >  drain_eth_rx(struct vhost_dev *vdev)
> >  {
> >     uint16_t rx_count, enqueue_count;
> > -   struct rte_mbuf *pkts[MAX_PKT_BURST],
> *comp_pkts[MAX_PKT_BURST];
> > -   uint32_t nr_comp = 0;
> > +   struct rte_mbuf *pkts[MAX_PKT_BURST];
> >
> >     rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
> >                                 pkts, MAX_PKT_BURST);
> >
> > -   while (likely(vdev->nr_async_pkts))
> > -           complete_async_pkts(vdev, VIRTIO_RXQ);
> > -
> >     if (!rx_count)
> >             return;
> >
> > @@ -1128,22 +1197,31 @@ drain_eth_rx(struct vhost_dev *vdev)
> >             enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
> >                                             pkts, rx_count);
> >     } else if (async_vhost_driver) {
> > +           uint32_t cpu_cpl_nr = 0;
> > +           uint16_t enqueue_fail = 0;
> > +           struct rte_mbuf *m_cpu_cpl[MAX_PKT_BURST];
> > +
> > +           complete_async_pkts(vdev);
> >             enqueue_count = rte_vhost_submit_enqueue_burst(vdev-
> >vid,
> > -                                   VIRTIO_RXQ, pkts, rx_count,
> comp_pkts,
> > -                                   &nr_comp);
> > -           if (nr_comp > 0) {
> > -                   free_pkts(comp_pkts, nr_comp);
> > -                   enqueue_count -= nr_comp;
> > -           }
> > -           vdev->nr_async_pkts += enqueue_count;
> > +                                   VIRTIO_RXQ, pkts, rx_count,
> > +                                   m_cpu_cpl, &cpu_cpl_nr);
> > +           atomic_fetch_add(&vdev->nr_async_pkts,
> > +                                   enqueue_count - cpu_cpl_nr);
> > +           if (cpu_cpl_nr)
> > +                   free_pkts(m_cpu_cpl, cpu_cpl_nr);
> > +
> > +           enqueue_fail = rx_count - enqueue_count;
> > +           if (enqueue_fail)
> > +                   free_pkts(&pkts[enqueue_count], enqueue_fail);
> > +
> >     } else {
> >             enqueue_count = rte_vhost_enqueue_burst(vdev->vid,
> VIRTIO_RXQ,
> >                                             pkts, rx_count);
> >     }
> >
> >     if (enable_stats) {
> > -           rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
> > -           rte_atomic64_add(&vdev->stats.rx_atomic,
> enqueue_count);
> > +           atomic_fetch_add(&vdev->stats.rx_total_atomic, rx_count);
> > +           atomic_fetch_add(&vdev->stats.rx_atomic,
> enqueue_count);
> >     }
> >
> >     if (!async_vhost_driver)
> > @@ -1212,7 +1290,7 @@ switch_worker(void *arg __rte_unused)
> >
> >     while(1) {
> >             drain_mbuf_table(tx_q);
> > -
> > +           drain_vhost_table();
> >             /*
> >              * Inform the configuration core that we have exited the
> >              * linked list and that no devices are in use if requested.
> > @@ -1253,6 +1331,7 @@ destroy_device(int vid)  {
> >     struct vhost_dev *vdev = NULL;
> >     int lcore;
> > +   uint16_t i;
> >
> >     TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
> >             if (vdev->vid == vid)
> > @@ -1266,6 +1345,9 @@ destroy_device(int vid)
> >             rte_pause();
> >     }
> >
> > +   for (i = 0; i < RTE_MAX_LCORE; i++)
> > +           rte_free(vhost_txbuff[i * MAX_VHOST_DEVICE + vid]);
> > +
> >     if (builtin_net_driver)
> >             vs_vhost_net_remove(vdev);
> >
> > @@ -1308,6 +1390,7 @@ static int
> >  new_device(int vid)
> >  {
> >     int lcore, core_add = 0;
> > +   uint16_t i;
> >     uint32_t device_num_min = num_devices;
> >     struct vhost_dev *vdev;
> >     vdev = rte_zmalloc("vhost device", sizeof(*vdev),
> > RTE_CACHE_LINE_SIZE); @@ -1319,6 +1402,19 @@ new_device(int vid)
> >     }
> >     vdev->vid = vid;
> >
> > +   for (i = 0; i < RTE_MAX_LCORE; i++) {
> > +           vhost_txbuff[i * MAX_VHOST_DEVICE + vid]
> > +                   = rte_zmalloc("vhost bufftable",
> > +                           sizeof(struct vhost_bufftable),
> > +                           RTE_CACHE_LINE_SIZE);
> > +
> > +           if (vhost_txbuff[i * MAX_VHOST_DEVICE + vid] == NULL) {
> > +                   RTE_LOG(INFO, VHOST_DATA,
> > +                     "(%d) couldn't allocate memory for vhost TX\n", vid);
> > +                   return -1;
> > +           }
> > +   }
> > +
> >     if (builtin_net_driver)
> >             vs_vhost_net_setup(vdev);
> >
> > @@ -1353,12 +1449,15 @@ new_device(int vid)
> >     if (async_vhost_driver) {
> >             struct rte_vhost_async_features f;
> >             struct rte_vhost_async_channel_ops channel_ops;
> > +
> >             if (strncmp(dma_type, "ioat", 4) == 0) {
> >                     channel_ops.transfer_data = ioat_transfer_data_cb;
> >                     channel_ops.check_completed_copies =
> >                             ioat_check_completed_copies_cb;
> > +
> >                     f.async_inorder = 1;
> >                     f.async_threshold = 256;
> > +
> >                     return rte_vhost_async_channel_register(vid,
> VIRTIO_RXQ,
> >                             f.intval, &channel_ops);
> >             }
> > @@ -1402,8 +1501,8 @@ print_stats(__rte_unused void *arg)
> >                     tx         = vdev->stats.tx;
> >                     tx_dropped = tx_total - tx;
> >
> > -                   rx_total   = rte_atomic64_read(&vdev-
> >stats.rx_total_atomic);
> > -                   rx         = rte_atomic64_read(&vdev->stats.rx_atomic);
> > +                   rx_total   = atomic_load(&vdev-
> >stats.rx_total_atomic);
> > +                   rx         = atomic_load(&vdev->stats.rx_atomic);
> >                     rx_dropped = rx_total - rx;
> >
> >                     printf("Statistics for device %d\n"
> > @@ -1602,6 +1701,7 @@ main(int argc, char *argv[])
> >     /* Register vhost user driver to handle vhost messages. */
> >     for (i = 0; i < nb_sockets; i++) {
> >             char *file = socket_files + i * PATH_MAX;
> > +
> >             if (async_vhost_driver)
> >                     flags = flags | RTE_VHOST_USER_ASYNC_COPY;
> >
> > diff --git a/examples/vhost/main.h b/examples/vhost/main.h index
> > 4317b6ae8..6aa798a3e 100644
> > --- a/examples/vhost/main.h
> > +++ b/examples/vhost/main.h
> > @@ -8,6 +8,7 @@
> >  #include <sys/queue.h>
> >
> >  #include <rte_ether.h>
> > +#include <stdatomic.h>
> >
> >  /* Macros for printing using RTE_LOG */  #define
> > RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1 @@ -21,8 +22,8 @@
> enum
> > {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};  struct device_statistics {
> >     uint64_t        tx;
> >     uint64_t        tx_total;
> > -   rte_atomic64_t  rx_atomic;
> > -   rte_atomic64_t  rx_total_atomic;
> > +   atomic_int_least64_t    rx_atomic;
> > +   atomic_int_least64_t    rx_total_atomic;
> >  };
> >
> >  struct vhost_queue {
> > @@ -51,7 +52,7 @@ struct vhost_dev {
> >     uint64_t features;
> >     size_t hdr_len;
> >     uint16_t nr_vrings;
> > -   uint16_t nr_async_pkts;
> > +   atomic_int_least16_t nr_async_pkts;
> >     struct rte_vhost_memory *mem;
> >     struct device_statistics stats;
> >     TAILQ_ENTRY(vhost_dev) global_vdev_entry;
> > --
> > 2.29.2
> >

Reply via email to