This patch implements such an egress function on the Vhost lib. When
data is received from a front-end, it will also send the data to its
mirrorfront-end.

Signed-off-by: Cheng Jiang <cheng1.ji...@intel.com>
Signed-off-by: Wenwu Ma <wenwux...@intel.com>
---
 lib/vhost/rte_vhost_async.h |  12 +-
 lib/vhost/version.map       |   3 +
 lib/vhost/virtio_net.c      | 729 ++++++++++++++++++++++++++++++++++++
 3 files changed, 742 insertions(+), 2 deletions(-)

diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index b199af078c..b0772c2db7 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -85,11 +85,12 @@ struct rte_vhost_async_channel_ops {
 };
 
 /**
- * inflight async packet information
+ * in-flight async packet information
  */
 struct async_inflight_info {
        struct rte_mbuf *mbuf;
-       uint16_t descs; /* num of descs inflight */
+       struct virtio_net_hdr nethdr;
+       uint16_t descs; /* num of descs in-flight */
        uint16_t nr_buffers; /* num of buffers inflight for packed ring */
 };
 
@@ -268,4 +269,11 @@ __rte_experimental
 uint16_t rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
                struct rte_mbuf **pkts, uint16_t count);
 
+__rte_experimental
+uint16_t
+rte_vhost_async_try_egress_burst(int vid, uint16_t queue_id,
+       int mirr_vid, uint16_t mirr_queue_id,
+       struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
+       int *nr_inflight);
+
 #endif /* _RTE_VHOST_ASYNC_H_ */
diff --git a/lib/vhost/version.map b/lib/vhost/version.map
index 4c35fa4555..2529943a91 100644
--- a/lib/vhost/version.map
+++ b/lib/vhost/version.map
@@ -87,4 +87,7 @@ EXPERIMENTAL {
        rte_vhost_async_channel_register_thread_unsafe;
        rte_vhost_async_channel_unregister_thread_unsafe;
        rte_vhost_clear_queue_thread_unsafe;
+
+       # added in 21.11
+       rte_vhost_async_try_egress_burst;
 };
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index c9f0bb22e5..92074377b2 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -3827,3 +3827,732 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 
        return count;
 }
+
+static __rte_always_inline uint16_t
+async_poll_egress_completed_split(struct virtio_net *dev,
+               struct vhost_virtqueue *vq, uint16_t queue_id,
+               struct virtio_net *mirr_dev, struct vhost_virtqueue *mirr_vq,
+               struct rte_mbuf **pkts, uint16_t count, bool legacy_ol_flags)
+{
+       uint16_t n_pkts_cpl = 0, n_pkts_put = 0;
+       uint16_t start_idx, pkt_idx, from;
+       struct async_inflight_info *pkts_info;
+       uint16_t mirror_n_pkts_cpl = 0, n_descs = 0;
+
+       pkt_idx = vq->async_pkts_idx & (vq->size - 1);
+       pkts_info = vq->async_pkts_info;
+       start_idx = virtio_dev_rx_async_get_info_idx(pkt_idx, vq->size,
+                       vq->async_pkts_inflight_n);
+
+       if (count > vq->async_last_pkts_n) {
+               int ret;
+
+               ret = vq->async_ops.check_completed_copies(dev->vid, queue_id,
+                               0, count - vq->async_last_pkts_n);
+               if (unlikely(ret < 0)) {
+                       VHOST_LOG_DATA(ERR, "(%d) async channel poll error\n", 
dev->vid);
+                       ret = 0;
+               }
+               n_pkts_cpl = ret;
+       }
+
+       n_pkts_cpl += vq->async_last_pkts_n;
+       mirror_n_pkts_cpl += mirr_vq->async_last_pkts_n;
+       if (unlikely(n_pkts_cpl == 0)) {
+               mirr_vq->async_last_pkts_n = mirror_n_pkts_cpl;
+               return 0;
+       }
+
+       n_pkts_put = RTE_MIN(count, n_pkts_cpl);
+
+       for (pkt_idx = 0; pkt_idx < n_pkts_put; pkt_idx++) {
+               from = (start_idx + pkt_idx) & (vq->size - 1);
+               pkts[pkt_idx] = pkts_info[from].mbuf;
+               n_descs += pkts_info[from].descs;
+
+               if (virtio_net_with_host_offload(dev))
+                       vhost_dequeue_offload(&pkts_info[from].nethdr,
+                                       pkts[pkt_idx], legacy_ol_flags);
+       }
+
+       /* write back completed descs to used ring and update used idx */
+       write_back_completed_descs_split(vq, n_pkts_put);
+       __atomic_add_fetch(&vq->used->idx, n_pkts_put, __ATOMIC_RELEASE);
+       vhost_vring_call_split(dev, vq);
+
+       vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
+       vq->async_pkts_inflight_n -= n_pkts_put;
+
+       if (likely(mirr_vq->enabled && mirr_vq->access_ok)) {
+               write_back_completed_descs_split(mirr_vq, n_descs);
+
+               __atomic_add_fetch(&mirr_vq->used->idx, n_descs,
+                               __ATOMIC_RELEASE);
+               vhost_vring_call_split(mirr_dev, mirr_vq);
+       } else {
+               mirr_vq->last_async_desc_idx_split += n_descs;
+       }
+
+       return n_pkts_put;
+}
+
+static __rte_always_inline void
+egress_async_fill_desc(struct rte_vhost_async_desc *desc,
+       struct rte_vhost_iov_iter *src, struct rte_vhost_iov_iter *dst,
+       struct rte_vhost_iov_iter *mirror_dst)
+{
+       desc->src = src;
+       desc->dst = dst;
+       desc->mirror_dst = mirror_dst;
+}
+
+static __rte_always_inline int
+egress_async_desc_to_mbuf(struct virtio_net *dev,
+                 struct buf_vector *buf_vec, uint16_t nr_vec,
+                 struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
+                 struct iovec *src_iovec, struct iovec *dst_iovec,
+                 struct rte_vhost_iov_iter *src_it,
+                 struct rte_vhost_iov_iter *dst_it,
+                 struct virtio_net *mirr_dev, struct vhost_virtqueue *mirr_vq 
__rte_unused,
+                 struct buf_vector *mirr_buf_vec, uint16_t mirr_nr_vec, 
uint16_t mirr_num_buffers,
+                 struct iovec *mirr_dst_iovec, struct rte_vhost_iov_iter 
*mirr_dst_it,
+                 struct virtio_net_hdr *nethdr,
+                 int nr_iovec)
+{
+       uint64_t buf_addr, buf_iova;
+       uint64_t mapped_len;
+       uint32_t tlen = 0;
+       uint32_t buf_avail, buf_offset, buf_len;
+       uint32_t mbuf_avail, mbuf_offset;
+       uint32_t cpy_len;
+       /* A counter to avoid desc dead loop chain */
+       uint16_t vec_idx = 0;
+       int tvec_idx = 0;
+       struct rte_mbuf *cur = m, *prev = m;
+       struct virtio_net_hdr tmp_hdr;
+       struct virtio_net_hdr *hdr = NULL;
+
+       int error = 0;
+       uint64_t mirror_mapped_len;
+       uint64_t mirror_buf_addr, mirror_buf_iova;
+       uint32_t mirror_buf_avail, mirror_buf_offset, mirror_buf_len;
+       uint32_t mirror_vec_idx = 0;
+
+       mirror_buf_addr = mirr_buf_vec[mirror_vec_idx].buf_addr;
+       mirror_buf_iova = mirr_buf_vec[mirror_vec_idx].buf_iova;
+       mirror_buf_len = mirr_buf_vec[mirror_vec_idx].buf_len;
+
+       if (unlikely(mirror_buf_len < mirr_dev->vhost_hlen && mirr_nr_vec <= 
1)) {
+               error = -1;
+               goto out;
+       }
+
+       struct virtio_net_hdr_mrg_rxbuf *mirror_hdr = NULL;
+       mirror_hdr = (struct virtio_net_hdr_mrg_rxbuf 
*)(uintptr_t)mirror_buf_addr;
+       if (mirror_hdr && rxvq_is_mergeable(mirr_dev))
+               ASSIGN_UNLESS_EQUAL(mirror_hdr->num_buffers, mirr_num_buffers);
+
+       if (unlikely(mirror_buf_len < mirr_dev->vhost_hlen)) {
+               mirror_buf_offset = mirr_dev->vhost_hlen - mirror_buf_len;
+               mirror_vec_idx++;
+               mirror_buf_addr = mirr_buf_vec[mirror_vec_idx].buf_addr;
+               mirror_buf_iova = mirr_buf_vec[mirror_vec_idx].buf_iova;
+               mirror_buf_len = mirr_buf_vec[mirror_vec_idx].buf_len;
+               mirror_buf_avail = mirror_buf_len - mirror_buf_offset;
+       } else {
+               mirror_buf_offset = mirr_dev->vhost_hlen;
+               mirror_buf_avail = mirror_buf_len - mirr_dev->vhost_hlen;
+       }
+
+       buf_addr = buf_vec[vec_idx].buf_addr;
+       buf_len = buf_vec[vec_idx].buf_len;
+       buf_iova = buf_vec[vec_idx].buf_iova;
+
+       if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1))
+               return -1;
+
+       if (virtio_net_with_host_offload(dev)) {
+               if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
+                       /*
+                        * No luck, the virtio-net header doesn't fit
+                        * in a contiguous virtual area.
+                        */
+                       copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
+                       hdr = &tmp_hdr;
+               } else {
+                       hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
+               }
+       }
+
+       /*
+        * A virtio driver normally uses at least 2 desc buffers
+        * for Tx: the first for storing the header, and others
+        * for storing the data.
+        */
+       if (unlikely(buf_len < dev->vhost_hlen)) {
+               buf_offset = dev->vhost_hlen - buf_len;
+               vec_idx++;
+               buf_addr = buf_vec[vec_idx].buf_addr;
+               buf_iova = buf_vec[vec_idx].buf_iova;
+               buf_len = buf_vec[vec_idx].buf_len;
+               buf_avail  = buf_len - buf_offset;
+       } else if (buf_len == dev->vhost_hlen) {
+               if (unlikely(++vec_idx >= nr_vec))
+                       return -1;
+               buf_addr = buf_vec[vec_idx].buf_addr;
+               buf_iova = buf_vec[vec_idx].buf_iova;
+               buf_len = buf_vec[vec_idx].buf_len;
+
+               buf_offset = 0;
+               buf_avail = buf_len;
+       } else {
+               buf_offset = dev->vhost_hlen;
+               buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
+       }
+
+       PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset), 
(uint32_t)buf_avail, 0);
+
+       mbuf_offset = 0;
+       mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
+       int flag = 0;
+       static uint64_t total = 0;
+       while (1) {
+               cpy_len = RTE_MIN(buf_avail, mbuf_avail);
+               cpy_len = RTE_MIN(cpy_len, mirror_buf_avail);
+
+               while (cpy_len) {
+                       void *hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
+                                               buf_iova + buf_offset, cpy_len,
+                                               &mapped_len);
+                       if (unlikely(!hpa)) {
+                               VHOST_LOG_DATA(ERR, "(%d) %s: failed to get 
hpa.\n",
+                                       dev->vid, __func__);
+                               return -1;
+                       }
+
+                       void *mirror_hpa = (void 
*)(uintptr_t)gpa_to_first_hpa(mirr_dev,
+                                       mirror_buf_iova + mirror_buf_offset, 
cpy_len,
+                                       &mirror_mapped_len);
+                       if (unlikely(!mirror_hpa)) {
+                               VHOST_LOG_DATA(ERR, "(%d) %s: failed to get 
mirror hpa.\n",
+                               mirr_dev->vid, __func__);
+                               error = -1;
+                               goto out;
+                       }
+
+                       if (unlikely(tvec_idx >= nr_iovec)) {
+                               VHOST_LOG_DATA(ERR, "iovec is not enough for 
offloading\n");
+                               return -1;
+                       }
+
+                       if (unlikely(mirror_mapped_len != mapped_len)) {
+                               VHOST_LOG_DATA(ERR, "original mapped len is not 
equal to mirror len\n");
+                               return -1;
+                       }
+
+                       async_fill_vec(src_iovec + tvec_idx, hpa, 
(size_t)mapped_len);
+                       async_fill_vec(dst_iovec + tvec_idx,
+                               (void *)(uintptr_t)rte_pktmbuf_iova_offset(cur, 
mbuf_offset),
+                               (size_t)mapped_len);
+                       async_fill_vec(mirr_dst_iovec + tvec_idx,
+                                       mirror_hpa, (size_t)mirror_mapped_len);
+
+                       if ((((uintptr_t)rte_pktmbuf_iova_offset(cur, 
mbuf_offset) & 0xFFF) ^ ((uint64_t)mirror_hpa & 0xFFF)) != 0 && flag == 0)
+                       {
+                               total++;
+                               VHOST_LOG_DATA(ERR, "%lu.........mbuf=%p 
mirror_hpa=%p.\n", total, (void *)(uintptr_t)rte_pktmbuf_iova_offset(cur, 
mbuf_offset), mirror_hpa);
+                       }
+
+                       tvec_idx++;
+                       tlen += (uint32_t)mapped_len;
+                       cpy_len -= (uint32_t)mapped_len;
+                       mbuf_avail -= (uint32_t)mapped_len;
+                       mbuf_offset += (uint32_t)mapped_len;
+                       buf_avail -= (uint32_t)mapped_len;
+                       buf_offset += (uint32_t)mapped_len;
+                       mirror_buf_avail -= (uint32_t)mapped_len;
+                       mirror_buf_offset += (uint32_t)mapped_len;
+               }
+
+               /* This buf reaches to its end, get the next one */
+               if (buf_avail == 0) {
+                       if (++vec_idx >= nr_vec)
+                               break;
+
+                       buf_addr = buf_vec[vec_idx].buf_addr;
+                       buf_iova = buf_vec[vec_idx].buf_iova;
+                       buf_len = buf_vec[vec_idx].buf_len;
+
+                       buf_offset = 0;
+                       buf_avail = buf_len;
+
+                       PRINT_PACKET(dev, (uintptr_t)buf_addr, 
(uint32_t)buf_avail, 0);
+               }
+
+               if (mirror_buf_avail == 0) {
+                       mirror_vec_idx++;
+                       if (unlikely(mirror_vec_idx >= mirr_nr_vec)) {
+                               error = -1;
+                               goto out;
+                       }
+
+                       mirror_buf_addr = mirr_buf_vec[mirror_vec_idx].buf_addr;
+                       mirror_buf_iova = mirr_buf_vec[mirror_vec_idx].buf_iova;
+                       mirror_buf_len = mirr_buf_vec[mirror_vec_idx].buf_len;
+
+                       mirror_buf_offset = 0;
+                       mirror_buf_avail = mirror_buf_len;
+               }
+
+               /*
+                * This mbuf reaches to its end, get a new one
+                * to hold more data.
+                */
+               if (mbuf_avail == 0) {
+                       cur = rte_pktmbuf_alloc(mbuf_pool);
+                       if (unlikely(cur == NULL)) {
+                               VHOST_LOG_DATA(ERR, "Failed to allocate memory 
for mbuf.\n");
+                               return -1;
+                       }
+
+                       prev->next = cur;
+                       prev->data_len = mbuf_offset;
+                       m->nb_segs += 1;
+                       m->pkt_len += mbuf_offset;
+                       prev = cur;
+
+                       mbuf_offset = 0;
+                       mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
+               }
+               flag++;
+       }
+
+       prev->data_len = mbuf_offset;
+       m->pkt_len += mbuf_offset;
+
+       if (tlen) {
+               async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
+               async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
+               async_fill_iter(mirr_dst_it, tlen, mirr_dst_iovec, tvec_idx);
+               if (hdr)
+                       *nethdr = *hdr;
+       }
+out:
+       return error;
+}
+
+static __rte_always_inline uint16_t
+virtio_mirror_dev_tx_async_split(struct virtio_net *dev,
+               struct vhost_virtqueue *vq, uint16_t queue_id,
+               struct virtio_net *mirr_dev,
+               struct vhost_virtqueue *mirr_vq, uint16_t mirr_queue_id 
__rte_unused,
+               struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
+               uint16_t count, bool legacy_ol_flags)
+{
+       static bool allocerr_warned;
+       bool dropped = false;
+       uint16_t free_entries;
+       uint16_t pkt_idx, slot_idx = 0;
+       uint16_t nr_done_pkts = 0;
+       uint16_t nr_async_burst = 0;
+       uint16_t pkt_err = 0;
+       uint16_t iovec_idx = 0, it_idx = 0;
+       struct rte_vhost_iov_iter *it_pool = vq->it_pool;
+       struct iovec *vec_pool = vq->vec_pool;
+       struct iovec *src_iovec = vec_pool;
+       struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+       struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
+       struct async_inflight_info *pkts_info = vq->async_pkts_info;
+       struct rte_mbuf *pkts_prealloc[MAX_PKT_BURST];
+
+
+       struct buf_vector mirror_buf_vec[BUF_VECTOR_MAX];
+       uint16_t mirror_num_buffers;
+       uint16_t mirror_avail_head;
+       mirror_avail_head = __atomic_load_n(&mirr_vq->avail->idx, 
__ATOMIC_ACQUIRE);
+       rte_prefetch0(&mirr_vq->avail->ring[mirr_vq->last_avail_idx & 
(mirr_vq->size - 1)]);
+
+       struct rte_vhost_iov_iter *mirror_it_pool = mirr_vq->it_pool;
+       struct iovec *mirror_dst_iovec = mirr_vq->vec_pool;
+       uint16_t mirror_iovec_idx = 0, mirror_it_idx = 0;
+
+       /**
+        * The ordering between avail index and
+        * desc reads needs to be enforced.
+        */
+       free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) - 
vq->last_avail_idx;
+       if (free_entries == 0)
+               goto out;
+
+       rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
+
+       count = RTE_MIN(count, MAX_PKT_BURST);
+       count = RTE_MIN(count, free_entries);
+       VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n", dev->vid, 
count);
+
+       if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts_prealloc, count))
+               goto out;
+
+       for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+               uint16_t head_idx = 0;
+               uint16_t nr_vec = 0;
+               uint16_t to;
+               uint32_t buf_len;
+               int err;
+               struct buf_vector buf_vec[BUF_VECTOR_MAX];
+               struct rte_mbuf *pkt = pkts_prealloc[pkt_idx];
+               uint16_t mirror_nr_vec = 0;
+
+               if (unlikely(fill_vec_buf_split(dev, vq, vq->last_avail_idx,
+                                               &nr_vec, buf_vec,
+                                               &head_idx, &buf_len,
+                                               VHOST_ACCESS_RO) < 0)) {
+                       dropped = true;
+                       break;
+               }
+
+               if (unlikely(reserve_avail_buf_split(mirr_dev, mirr_vq,
+                                               buf_len, mirror_buf_vec, 
&mirror_num_buffers,
+                                               mirror_avail_head, 
&mirror_nr_vec) < 0)) {
+                       VHOST_LOG_DATA(DEBUG,
+                               "(%d) failed to get enough desc from mirror 
vring\n",
+                               mirr_dev->vid);
+                       mirr_vq->shadow_used_idx -= mirror_num_buffers;
+                       dropped = true;
+                       break;
+               }
+
+               err = virtio_dev_pktmbuf_prep(dev, pkt, buf_len);
+               if (unlikely(err)) {
+                       /**
+                        * mbuf allocation fails for jumbo packets when external
+                        * buffer allocation is not allowed and linear buffer
+                        * is required. Drop this packet.
+                        */
+                       if (!allocerr_warned) {
+                               VHOST_LOG_DATA(ERR,
+                                       "Failed mbuf alloc of size %d from %s 
on %s.\n",
+                                       buf_len, mbuf_pool->name, dev->ifname);
+                               allocerr_warned = true;
+                       }
+                       dropped = true;
+                       break;
+               }
+
+               slot_idx = (vq->async_pkts_idx + pkt_idx) & (vq->size - 1);
+               err = egress_async_desc_to_mbuf(dev, buf_vec, nr_vec, pkt,
+                               mbuf_pool, &src_iovec[iovec_idx],
+                               &dst_iovec[iovec_idx], &it_pool[it_idx],
+                               &it_pool[it_idx + 1],
+                               mirr_dev, mirr_vq,
+                               mirror_buf_vec, mirror_nr_vec, 
mirror_num_buffers,
+                               &mirror_dst_iovec[mirror_iovec_idx],
+                               &mirror_it_pool[mirror_it_idx],
+                               &pkts_info[slot_idx].nethdr,
+                               (VHOST_MAX_ASYNC_VEC >> 1) - iovec_idx);
+               if (unlikely(err)) {
+                       if (!allocerr_warned) {
+                               VHOST_LOG_DATA(ERR,
+                                       "Failed to offload copies to async 
channel %s.\n",
+                                       dev->ifname);
+                               allocerr_warned = true;
+                       }
+                       dropped = true;
+                       break;
+               }
+
+               egress_async_fill_desc(&tdes[nr_async_burst],
+                       &it_pool[it_idx], &it_pool[it_idx + 1], 
&mirror_it_pool[mirror_it_idx]);
+               pkts_info[slot_idx].mbuf = pkt;
+               nr_async_burst++;
+
+               iovec_idx += it_pool[it_idx].nr_segs;
+               it_idx += 2;
+
+               mirror_iovec_idx += mirror_it_pool[mirror_it_idx].nr_segs;
+               mirror_it_idx += 1;
+               mirr_vq->last_avail_idx += mirror_num_buffers;
+               pkts_info[slot_idx].descs = mirror_num_buffers;
+
+               /* store used descs */
+               to = vq->async_desc_idx_split & (vq->size - 1);
+               vq->async_descs_split[to].id = head_idx;
+               vq->async_descs_split[to].len = 0;
+               vq->async_desc_idx_split++;
+
+               vq->last_avail_idx++;
+
+               if (unlikely(nr_async_burst >= VHOST_ASYNC_BATCH_THRESHOLD)) {
+                       uint16_t nr_pkts;
+                       int32_t ret;
+
+                       ret = vq->async_ops.transfer_data(dev->vid, queue_id,
+                                       tdes, 0, nr_async_burst, true);
+                       if (unlikely(ret < 0)) {
+                               VHOST_LOG_DATA(ERR, "(%d) async channel submit 
error\n", dev->vid);
+                               ret = 0;
+                       }
+                       nr_pkts = ret;
+
+                       vq->async_pkts_inflight_n += nr_pkts;
+                       it_idx = 0;
+                       iovec_idx = 0;
+
+                       mirror_iovec_idx = 0;
+                       mirror_it_idx = 0;
+
+                       if (unlikely(nr_pkts < nr_async_burst)) {
+                               pkt_err = nr_async_burst - nr_pkts;
+                               nr_async_burst = 0;
+                               pkt_idx++;
+                               break;
+                       }
+                       nr_async_burst = 0;
+               }
+       }
+
+       if (unlikely(dropped))
+               rte_pktmbuf_free_bulk(&pkts_prealloc[pkt_idx], count - pkt_idx);
+
+       if (nr_async_burst) {
+               uint16_t nr_pkts;
+               int32_t ret;
+
+               ret = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, 
nr_async_burst, true);
+               if (unlikely(ret < 0)) {
+                       VHOST_LOG_DATA(ERR, "(%d) async channel submit 
error\n", dev->vid);
+                       ret = 0;
+               }
+               nr_pkts = ret;
+
+               vq->async_pkts_inflight_n += nr_pkts;
+
+               if (unlikely(nr_pkts < nr_async_burst))
+                       pkt_err = nr_async_burst - nr_pkts;
+       }
+
+       if (unlikely(pkt_err)) {
+               uint16_t nr_err_dma = pkt_err;
+               uint16_t num_descs = 0;
+
+               pkt_idx -= nr_err_dma;
+
+               /**
+                * recover async channel copy related structures and free 
pktmbufs
+                * for error pkts.
+                */
+               vq->async_desc_idx_split -= nr_err_dma;
+               while (nr_err_dma-- > 0) {
+                       rte_pktmbuf_free(pkts_info[slot_idx & (vq->size - 
1)].mbuf);
+                       num_descs += pkts_info[slot_idx & (vq->size - 1)].descs;
+                       slot_idx--;
+               }
+
+               /* recover available ring */
+               vq->last_avail_idx -= pkt_err;
+
+               mirr_vq->shadow_used_idx -= num_descs;
+               mirr_vq->last_avail_idx -= num_descs;
+       }
+
+       vq->async_pkts_idx += pkt_idx;
+
+       if (likely(mirr_vq->shadow_used_idx)) {
+               uint16_t to = mirr_vq->async_desc_idx_split & (mirr_vq->size - 
1);
+
+               store_dma_desc_info_split(mirr_vq->shadow_used_split,
+               mirr_vq->async_descs_split, mirr_vq->size, 0, to, 
mirr_vq->shadow_used_idx);
+
+               mirr_vq->async_desc_idx_split += mirr_vq->shadow_used_idx;
+               mirr_vq->async_pkts_idx += pkt_idx;
+               mirr_vq->async_pkts_inflight_n += pkt_idx;
+               mirr_vq->shadow_used_idx = 0;
+       }
+
+out:
+       if (vq->async_pkts_inflight_n > 0) {
+               nr_done_pkts = async_poll_egress_completed_split(dev, vq,
+                                       queue_id,
+                                       mirr_dev, mirr_vq,
+                                       pkts, count, legacy_ol_flags);
+       }
+
+       return nr_done_pkts;
+}
+
+__rte_noinline
+static uint16_t
+virtio_mirror_dev_tx_async_split_legacy(struct virtio_net *dev,
+               struct vhost_virtqueue *vq, uint16_t queue_id,
+               struct virtio_net *mirr_dev,
+               struct vhost_virtqueue *mirr_vq, uint16_t mirr_queue_id,
+               struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
+               uint16_t count)
+{
+       return virtio_mirror_dev_tx_async_split(dev, vq, queue_id,
+                               mirr_dev, mirr_vq, mirr_queue_id,
+                               mbuf_pool,
+                               pkts, count, true);
+}
+
+__rte_noinline
+static uint16_t
+virtio_mirror_dev_tx_async_split_compliant(struct virtio_net *dev,
+               struct vhost_virtqueue *vq, uint16_t queue_id,
+               struct virtio_net *mirr_dev,
+               struct vhost_virtqueue *mirr_vq, uint16_t mirr_queue_id,
+               struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
+               uint16_t count)
+{
+       return virtio_mirror_dev_tx_async_split(dev, vq, queue_id,
+                               mirr_dev, mirr_vq, mirr_queue_id,
+                               mbuf_pool,
+                               pkts, count, false);
+}
+
+uint16_t
+rte_vhost_async_try_egress_burst(int vid, uint16_t queue_id,
+       int mirr_vid, uint16_t mirr_queue_id,
+       struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
+       int *nr_inflight)
+{
+       struct virtio_net *dev;
+       struct virtio_net *mirror_dev;
+       struct rte_mbuf *rarp_mbuf = NULL;
+       struct vhost_virtqueue *vq;
+       struct vhost_virtqueue *mirror_vq;
+       int16_t success = 1;
+
+       *nr_inflight = -1;
+
+       dev = get_device(vid);
+       mirror_dev = get_device(mirr_vid);
+       if (!dev || !mirror_dev)
+               return 0;
+
+       if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))
+               || unlikely(!(mirror_dev->flags & 
VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+               VHOST_LOG_DATA(ERR,
+                       "(%d) %s: built-in vhost net backend is disabled.\n",
+                       dev->vid, __func__);
+               return 0;
+       }
+
+       if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))
+               || unlikely(!is_valid_virt_queue_idx(mirr_queue_id, 0, 
mirror_dev->nr_vring))) {
+               VHOST_LOG_DATA(ERR,
+                       "(%d) %s: invalid virtqueue idx %d.\n",
+                       dev->vid, __func__, queue_id);
+               return 0;
+       }
+
+       vq = dev->virtqueue[queue_id];
+       mirror_vq = mirror_dev->virtqueue[mirr_queue_id];
+
+       if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0)
+               || unlikely(rte_spinlock_trylock(&mirror_vq->access_lock) == 0))
+               return 0;
+
+       if (unlikely(vq->enabled == 0) || unlikely(mirror_vq->enabled == 0)) {
+               count = 0;
+               goto out_access_unlock;
+       }
+
+       if (unlikely(!vq->async_registered) || 
unlikely(!mirror_vq->async_registered)) {
+               VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id 
%d.\n",
+                       dev->vid, __func__, queue_id);
+               count = 0;
+               goto out_access_unlock;
+       }
+
+       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+               vhost_user_iotlb_rd_lock(vq);
+
+       if (unlikely(vq->access_ok == 0) || unlikely(mirror_vq->access_ok == 0))
+               if (unlikely(vring_translate(dev, vq) < 0)
+                       || unlikely(vring_translate(mirror_dev, mirror_vq) < 
0)) {
+                       count = 0;
+                       goto out_access_unlock;
+               }
+
+       /*
+        * Construct a RARP broadcast packet, and inject it to the "pkts"
+        * array, to looks like that guest actually send such packet.
+        *
+        * Check user_send_rarp() for more information.
+        *
+        * broadcast_rarp shares a cacheline in the virtio_net structure
+        * with some fields that are accessed during enqueue and
+        * __atomic_compare_exchange_n causes a write if performed compare
+        * and exchange. This could result in false sharing between enqueue
+        * and dequeue.
+        *
+        * Prevent unnecessary false sharing by reading broadcast_rarp first
+        * and only performing compare and exchange if the read indicates it
+        * is likely to be set.
+        */
+       if (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&
+                       __atomic_compare_exchange_n(&dev->broadcast_rarp,
+                       &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
+
+               rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
+               if (rarp_mbuf == NULL) {
+                       VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
+                       count = 0;
+                       goto out;
+               }
+               count -= 1;
+       }
+
+#if 0
+       if (unlikely(__atomic_load_n(&mirror_dev->broadcast_rarp, 
__ATOMIC_ACQUIRE) &&
+                       __atomic_compare_exchange_n(&mirror_dev->broadcast_rarp,
+                       &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
+
+               rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, 
&mirror_dev->mac);
+               if (rarp_mbuf == NULL) {
+                       VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
+                       count = 0;
+                       goto out;
+               }
+               count -= 1;
+       }
+#endif
+       if (unlikely(vq_is_packed(dev)) || unlikely(vq_is_packed(mirror_dev))) {
+               VHOST_LOG_DATA(ERR,
+                       "(%d) %s: async dequeue does not support packed 
ring.\n",
+                       dev->vid, __func__);
+               return 0;
+       }
+
+       if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
+               count = virtio_mirror_dev_tx_async_split_legacy(dev, vq, 
queue_id,
+                               mirror_dev, mirror_vq, mirr_queue_id,
+                               mbuf_pool, pkts, count);
+       else
+               count = virtio_mirror_dev_tx_async_split_compliant(dev, vq, 
queue_id,
+                               mirror_dev, mirror_vq, mirr_queue_id,
+                               mbuf_pool, pkts, count);
+
+out:
+       *nr_inflight = vq->async_pkts_inflight_n;
+
+       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+               vhost_user_iotlb_rd_unlock(vq);
+
+out_access_unlock:
+       rte_spinlock_unlock(&vq->access_lock);
+       rte_spinlock_unlock(&mirror_vq->access_lock);
+
+       if (unlikely(rarp_mbuf != NULL)) {
+               /*
+                * Inject it to the head of "pkts" array, so that switch's mac
+                * learning table will get updated first.
+                */
+               memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
+               pkts[0] = rarp_mbuf;
+               count += 1;
+       }
+
+       return count;
+}
-- 
2.35.1

Reply via email to