Hi, > -----Original Message----- > From: Ma, WenwuX <wenwux...@intel.com> > Sent: Saturday, September 18, 2021 3:27 AM > To: dev@dpdk.org > Cc: maxime.coque...@redhat.com; Xia, Chenbo <chenbo....@intel.com>; > Jiang, Cheng1 <cheng1.ji...@intel.com>; Hu, Jiayu <jiayu...@intel.com>; > Pai G, Sunil <sunil.pa...@intel.com>; Yang, YvonneX > <yvonnex.y...@intel.com>; Ma, WenwuX <wenwux...@intel.com> > Subject: [PATCH v2 2/4] examples/vhost: refactor vhost enqueue and > dequeue datapaths > > Previously, by judging the flag, we call different enqueue/dequeue > functions in data path. > > Now, we use an ops that was initialized when Vhost was created, > so that we can call ops directly in Vhost data path without any more > flag judgment. > > Signed-off-by: Wenwu Ma <wenwux...@intel.com> > Reviewed-by: Maxime Coquelin <maxime.coque...@redhat.com> > Tested-by: Yvonne Yang <yvonnex.y...@intel.com> > --- > examples/vhost/main.c | 100 +++++++++++++++++++++--------------- > examples/vhost/main.h | 28 ++++++++-- > examples/vhost/virtio_net.c | 16 +++++- > 3 files changed, 98 insertions(+), 46 deletions(-) > > diff --git a/examples/vhost/main.c b/examples/vhost/main.c > index d0bf1f31e3..254f7097bc 100644 > --- a/examples/vhost/main.c > +++ b/examples/vhost/main.c > @@ -106,6 +106,8 @@ static uint32_t burst_rx_retry_num = > BURST_RX_RETRIES; > static char *socket_files; > static int nb_sockets; > > +static struct vhost_queue_ops vdev_queue_ops[MAX_VHOST_DEVICE]; > + > /* empty vmdq configuration structure. Filled in programatically */ > static struct rte_eth_conf vmdq_conf_default = { > .rxmode = { > @@ -879,22 +881,8 @@ drain_vhost(struct vhost_dev *vdev) > uint16_t nr_xmit = vhost_txbuff[buff_idx]->len; > struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table; > > - if (builtin_net_driver) { > - ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit); > - } else if (async_vhost_driver) { > - uint16_t enqueue_fail = 0; > - > - complete_async_pkts(vdev); > - ret = rte_vhost_submit_enqueue_burst(vdev->vid, > VIRTIO_RXQ, m, nr_xmit); > - __atomic_add_fetch(&vdev->pkts_inflight, ret, > __ATOMIC_SEQ_CST); > - > - enqueue_fail = nr_xmit - ret; > - if (enqueue_fail) > - free_pkts(&m[ret], nr_xmit - ret); > - } else { > - ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ, > - m, nr_xmit); > - } > + ret = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev, > + VIRTIO_RXQ, m, nr_xmit); >
Now, the line char number limit is 100, so you don't have to put it in 2 lines. > if (enable_stats) { > __atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit, > @@ -1173,6 +1161,33 @@ drain_mbuf_table(struct mbuf_table *tx_q) > } > } > > +uint16_t > +async_enqueue_pkts(struct vhost_dev *vdev, uint16_t queue_id, > + struct rte_mbuf **pkts, uint32_t rx_count) > +{ > + uint16_t enqueue_count; > + uint16_t enqueue_fail = 0; > + > + complete_async_pkts(vdev); > + enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid, > + queue_id, pkts, rx_count); Same here. > + __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, > + __ATOMIC_SEQ_CST); Same here. > + > + enqueue_fail = rx_count - enqueue_count; > + if (enqueue_fail) > + free_pkts(&pkts[enqueue_count], enqueue_fail); > + > + return enqueue_count; > +} > + > +uint16_t > +sync_enqueue_pkts(struct vhost_dev *vdev, uint16_t queue_id, > + struct rte_mbuf **pkts, uint32_t rx_count) > +{ > + return rte_vhost_enqueue_burst(vdev->vid, queue_id, pkts, > rx_count); > +} > + > static __rte_always_inline void > drain_eth_rx(struct vhost_dev *vdev) > { > @@ -1203,25 +1218,8 @@ drain_eth_rx(struct vhost_dev *vdev) > } > } > > - if (builtin_net_driver) { > - enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ, > - pkts, rx_count); > - } else if (async_vhost_driver) { > - uint16_t enqueue_fail = 0; > - > - complete_async_pkts(vdev); > - enqueue_count = rte_vhost_submit_enqueue_burst(vdev- > >vid, > - VIRTIO_RXQ, pkts, rx_count); > - __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, > __ATOMIC_SEQ_CST); > - > - enqueue_fail = rx_count - enqueue_count; > - if (enqueue_fail) > - free_pkts(&pkts[enqueue_count], enqueue_fail); > - > - } else { > - enqueue_count = rte_vhost_enqueue_burst(vdev->vid, > VIRTIO_RXQ, > - pkts, rx_count); > - } > + enqueue_count = vdev_queue_ops[vdev- > >vid].enqueue_pkt_burst(vdev, > + VIRTIO_RXQ, pkts, rx_count); > > if (enable_stats) { > __atomic_add_fetch(&vdev->stats.rx_total_atomic, > rx_count, > @@ -1234,6 +1232,14 @@ drain_eth_rx(struct vhost_dev *vdev) > free_pkts(pkts, rx_count); > } > > +uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id, > + struct rte_mempool *mbuf_pool, > + struct rte_mbuf **pkts, uint16_t count) > +{ > + return rte_vhost_dequeue_burst(dev->vid, queue_id, > + mbuf_pool, pkts, count); Same here. > +} > + > static __rte_always_inline void > drain_virtio_tx(struct vhost_dev *vdev) > { > @@ -1241,13 +1247,8 @@ drain_virtio_tx(struct vhost_dev *vdev) > uint16_t count; > uint16_t i; > > - if (builtin_net_driver) { > - count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool, > - pkts, MAX_PKT_BURST); > - } else { > - count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, > - mbuf_pool, pkts, MAX_PKT_BURST); > - } > + count = vdev_queue_ops[vdev->vid].dequeue_pkt_burst(vdev, > + VIRTIO_TXQ, mbuf_pool, pkts, > MAX_PKT_BURST); > > /* setup VMDq for the first packet */ > if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) { > @@ -1432,6 +1433,21 @@ new_device(int vid) > } > } > > + if (builtin_net_driver) { > + vdev_queue_ops[vid].enqueue_pkt_burst = > builtin_enqueue_pkts; > + vdev_queue_ops[vid].dequeue_pkt_burst = > builtin_dequeue_pkts; > + } else { > + if (async_vhost_driver) { > + vdev_queue_ops[vid].enqueue_pkt_burst = > + async_enqueue_pkts; Same here. > + } else { > + vdev_queue_ops[vid].enqueue_pkt_burst = > + sync_enqueue_pkts; > + } Same here. And it seems we don't need '{ }' here. Thanks, Cheng > + > + vdev_queue_ops[vid].dequeue_pkt_burst = > sync_dequeue_pkts; > + } > + > if (builtin_net_driver) > vs_vhost_net_setup(vdev); > > diff --git a/examples/vhost/main.h b/examples/vhost/main.h > index e7b1ac60a6..2c5a558f12 100644 > --- a/examples/vhost/main.h > +++ b/examples/vhost/main.h > @@ -61,6 +61,19 @@ struct vhost_dev { > struct vhost_queue queues[MAX_QUEUE_PAIRS * 2]; > } __rte_cache_aligned; > > +typedef uint16_t (*vhost_enqueue_burst_t)(struct vhost_dev *dev, > + uint16_t queue_id, struct rte_mbuf **pkts, > + uint32_t count); > + > +typedef uint16_t (*vhost_dequeue_burst_t)(struct vhost_dev *dev, > + uint16_t queue_id, struct rte_mempool *mbuf_pool, > + struct rte_mbuf **pkts, uint16_t count); > + > +struct vhost_queue_ops { > + vhost_enqueue_burst_t enqueue_pkt_burst; > + vhost_dequeue_burst_t dequeue_pkt_burst; > +}; > + > TAILQ_HEAD(vhost_dev_tailq_list, vhost_dev); > > > @@ -87,7 +100,16 @@ void vs_vhost_net_remove(struct vhost_dev *dev); > uint16_t vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id, > struct rte_mbuf **pkts, uint32_t count); > > -uint16_t vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id, > - struct rte_mempool *mbuf_pool, > - struct rte_mbuf **pkts, uint16_t count); > +uint16_t builtin_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id, > + struct rte_mbuf **pkts, uint32_t count); > +uint16_t builtin_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id, > + struct rte_mempool *mbuf_pool, > + struct rte_mbuf **pkts, uint16_t count); > +uint16_t sync_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id, > + struct rte_mbuf **pkts, uint32_t count); > +uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id, > + struct rte_mempool *mbuf_pool, > + struct rte_mbuf **pkts, uint16_t count); > +uint16_t async_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id, > + struct rte_mbuf **pkts, uint32_t count); > #endif /* _MAIN_H_ */ > diff --git a/examples/vhost/virtio_net.c b/examples/vhost/virtio_net.c > index 9064fc3a82..2432a96566 100644 > --- a/examples/vhost/virtio_net.c > +++ b/examples/vhost/virtio_net.c > @@ -238,6 +238,13 @@ vs_enqueue_pkts(struct vhost_dev *dev, uint16_t > queue_id, > return count; > } > > +uint16_t > +builtin_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id, > + struct rte_mbuf **pkts, uint32_t count) > +{ > + return vs_enqueue_pkts(dev, queue_id, pkts, count); > +} > + > static __rte_always_inline int > dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr, > struct rte_mbuf *m, uint16_t desc_idx, > @@ -363,7 +370,7 @@ dequeue_pkt(struct vhost_dev *dev, struct > rte_vhost_vring *vr, > return 0; > } > > -uint16_t > +static uint16_t > vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id, > struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t > count) > { > @@ -440,3 +447,10 @@ vs_dequeue_pkts(struct vhost_dev *dev, uint16_t > queue_id, > > return i; > } > + > +uint16_t > +builtin_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id, > + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t > count) > +{ > + return vs_dequeue_pkts(dev, queue_id, mbuf_pool, pkts, count); > +} > -- > 2.25.1