On 7/10/20 4:38 AM, Joyce Kong wrote:
> Restrict pointer aliasing to allow the compiler to vectorize loop
> more aggressively.
>
> With this patch, a 9.6% improvement is observed in throughput for
> the packed virtio-net PVP case, and a 2.8% improvement in throughput
> for the packed virtio-user PVP case. All performance data are measured
> on ThunderX-2 platform under 0.001% acceptable packet loss with 1 core
> on both vhost and virtio side.
>
> Signed-off-by: Joyce Kong <joyce.k...@arm.com>
> Reviewed-by: Phil Yang <phil.y...@arm.com>
> ---
> drivers/net/virtio/virtio_rxtx_simple_neon.c | 5 +++--
> lib/librte_vhost/virtio_net.c | 14 +++++++-------
> 2 files changed, 10 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c
> b/drivers/net/virtio/virtio_rxtx_simple_neon.c
> index a9b649814..02520fda8 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
> +++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
> @@ -36,8 +36,9 @@
> * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet
> */
> uint16_t
> -virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf
> - **__rte_restrict rx_pkts, uint16_t nb_pkts)
> +virtio_recv_pkts_vec(void *rx_queue,
> + struct rte_mbuf **__rte_restrict rx_pkts,
> + uint16_t nb_pkts)
> {
> struct virtnet_rx *rxvq = rx_queue;
> struct virtqueue *vq = rxvq->vq;
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 236498f71..1d0be3dd4 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -1353,8 +1353,8 @@ virtio_dev_rx_single_packed(struct virtio_net *dev,
>
> static __rte_noinline uint32_t
> virtio_dev_rx_packed(struct virtio_net *dev,
> - struct vhost_virtqueue *vq,
> - struct rte_mbuf **pkts,
> + struct vhost_virtqueue *__rte_restrict vq,
> + struct rte_mbuf **__rte_restrict pkts,
> uint32_t count)
> {
> uint32_t pkt_idx = 0;
> @@ -1439,7 +1439,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
>
> uint16_t
> rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
> - struct rte_mbuf **pkts, uint16_t count)
> + struct rte_mbuf **__rte_restrict pkts, uint16_t count)
> {
> struct virtio_net *dev = get_device(vid);
>
> @@ -2671,9 +2671,9 @@ free_zmbuf(struct vhost_virtqueue *vq)
>
> static __rte_noinline uint16_t
> virtio_dev_tx_packed_zmbuf(struct virtio_net *dev,
> - struct vhost_virtqueue *vq,
> + struct vhost_virtqueue *__rte_restrict vq,
> struct rte_mempool *mbuf_pool,
> - struct rte_mbuf **pkts,
> + struct rte_mbuf **__rte_restrict pkts,
> uint32_t count)
> {
> uint32_t pkt_idx = 0;
> @@ -2707,9 +2707,9 @@ virtio_dev_tx_packed_zmbuf(struct virtio_net *dev,
>
> static __rte_noinline uint16_t
> virtio_dev_tx_packed(struct virtio_net *dev,
> - struct vhost_virtqueue *vq,
> + struct vhost_virtqueue *__rte_restrict vq,
> struct rte_mempool *mbuf_pool,
> - struct rte_mbuf **pkts,
> + struct rte_mbuf **__rte_restrict pkts,
> uint32_t count)
> {
> uint32_t pkt_idx = 0;
>
The vhost part looks good to me.
Acked-by: Adrián Moreno <amore...@redhat.com>
--
Adrián Moreno