> -----Original Message-----
> From: Marvin Liu <yong....@intel.com>
> Sent: Thursday, September 26, 2019 1:13 AM
> To: maxime.coque...@redhat.com; tiwei....@intel.com;
> zhihong.w...@intel.com; step...@networkplumber.org; Gavin Hu (Arm
> Technology China) <gavin...@arm.com>
> Cc: dev@dpdk.org; Marvin Liu <yong....@intel.com>
> Subject: [PATCH v3 07/15] vhost: add flush function for batch enqueue
> 
> Flush used flags when batched enqueue function is finished. Descriptor's
> flags are pre-calculated as they will be reset by vhost.
> 
> Signed-off-by: Marvin Liu <yong....@intel.com>
> 
> diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
> index 18a207fc6..7bf9ff9b7 100644
> --- a/lib/librte_vhost/vhost.h
> +++ b/lib/librte_vhost/vhost.h
> @@ -39,6 +39,9 @@
> 
>  #define VHOST_LOG_CACHE_NR 32
> 
> +#define PACKED_RX_USED_FLAG  (0ULL | VRING_DESC_F_AVAIL |
> VRING_DESC_F_USED \
> +                             | VRING_DESC_F_WRITE)
> +#define PACKED_RX_USED_WRAP_FLAG (VRING_DESC_F_WRITE)
>  #define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \
>                           sizeof(struct vring_packed_desc))
>  #define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index f85619dc2..a629e66d4 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -169,6 +169,49 @@ update_shadow_used_ring_packed(struct
> vhost_virtqueue *vq,
>       vq->shadow_used_packed[i].count = count;
>  }
> 
> +static __rte_always_inline void
> +flush_used_batch_packed(struct virtio_net *dev, struct vhost_virtqueue
> *vq,
> +     uint64_t *lens, uint16_t *ids, uint16_t flags)
> +{
> +     uint16_t i;
> +
> +     UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM)
> +     for (i = 0; i < PACKED_BATCH_SIZE; i++) {
> +             vq->desc_packed[vq->last_used_idx + i].id = ids[i];
> +             vq->desc_packed[vq->last_used_idx + i].len = lens[i];
> +     }
> +
> +     rte_smp_wmb();
> +     UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM)
> +     for (i = 0; i < PACKED_BATCH_SIZE; i++)
> +             vq->desc_packed[vq->last_used_idx + i].flags = flags;
> +
> +     vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
> +                                sizeof(struct vring_packed_desc),
> +                                sizeof(struct vring_packed_desc) *
> +                                PACKED_BATCH_SIZE);
> +     vhost_log_cache_sync(dev, vq);
> +
> +     vq->last_used_idx += PACKED_BATCH_SIZE;
> +     if (vq->last_used_idx >= vq->size) {
> +             vq->used_wrap_counter ^= 1;
> +             vq->last_used_idx -= vq->size;
> +     }
> +}
> +
> +static __rte_always_inline void
> +flush_enqueue_batch_packed(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
> +     uint64_t *lens, uint16_t *ids)
> +{
> +     uint16_t flags = 0;
> +
> +     if (vq->used_wrap_counter)
> +             flags = PACKED_RX_USED_FLAG;
> +     else
> +             flags = PACKED_RX_USED_WRAP_FLAG;
> +     flush_used_batch_packed(dev, vq, lens, ids, flags);
> +}
> +
>  static __rte_always_inline void
>  update_enqueue_shadow_used_ring_packed(struct vhost_virtqueue *vq,
>       uint16_t desc_idx, uint32_t len, uint16_t count)
> @@ -937,6 +980,7 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev,
> struct vhost_virtqueue *vq,
>       struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
>       uint32_t buf_offset = dev->vhost_hlen;
>       uint64_t lens[PACKED_BATCH_SIZE];
> +     uint16_t ids[PACKED_BATCH_SIZE];
>       uint16_t i;
> 
>       if (unlikely(avail_idx & PACKED_BATCH_MASK))
> @@ -1003,6 +1047,12 @@ virtio_dev_rx_batch_packed(struct virtio_net
> *dev, struct vhost_virtqueue *vq,
>                          pkts[i]->pkt_len);
>       }
> 
> +     UNROLL_PRAGMA(UNROLL_PRAGMA_PARAM)
> +     for (i = 0; i < PACKED_BATCH_SIZE; i++)
> +             ids[i] = descs[avail_idx + i].id;
> +
> +     flush_enqueue_batch_packed(dev, vq, lens, ids);
> +
>       return 0;
>  }

Reviewed-by: Gavin Hu <gavin...@arm.com>

> --
> 2.17.1

Reply via email to