> -----Original Message-----
> From: Maxime Coquelin <maxime.coque...@redhat.com>
> Sent: Tuesday, March 23, 2021 5:02 PM
> To: dev@dpdk.org; Xia, Chenbo <chenbo....@intel.com>; amore...@redhat.com;
> david.march...@redhat.com; olivier.m...@6wind.com; bnem...@redhat.com
> Cc: Maxime Coquelin <maxime.coque...@redhat.com>
> Subject: [PATCH v4 3/3] vhost: optimize vhost virtqueue struct
> 
> This patch moves vhost_virtqueue struct fields in order
> to both optimize packing and move hot fields on the first
> cachelines.
> 
> Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com>
> ---
>  lib/librte_vhost/vhost.c      |  6 ++--
>  lib/librte_vhost/vhost.h      | 54 ++++++++++++++++++-----------------
>  lib/librte_vhost/vhost_user.c | 23 +++++++--------
>  lib/librte_vhost/virtio_net.c | 12 ++++----
>  4 files changed, 48 insertions(+), 47 deletions(-)
> 
> diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
> index a8032e3ba1..04d63b2f02 100644
> --- a/lib/librte_vhost/vhost.c
> +++ b/lib/librte_vhost/vhost.c
> @@ -524,7 +524,7 @@ vring_translate(struct virtio_net *dev, struct
> vhost_virtqueue *vq)
>       if (log_translate(dev, vq) < 0)
>               return -1;
> 
> -     vq->access_ok = 1;
> +     vq->access_ok = true;
> 
>       return 0;
>  }
> @@ -535,7 +535,7 @@ vring_invalidate(struct virtio_net *dev, struct
> vhost_virtqueue *vq)
>       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
>               vhost_user_iotlb_wr_lock(vq);
> 
> -     vq->access_ok = 0;
> +     vq->access_ok = false;
>       vq->desc = NULL;
>       vq->avail = NULL;
>       vq->used = NULL;
> @@ -1451,7 +1451,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
> 
>       rte_spinlock_lock(&vq->access_lock);
> 
> -     if (unlikely(vq->enabled == 0 || vq->avail == NULL))
> +     if (unlikely(!vq->enabled || vq->avail == NULL))
>               goto out;
> 
>       ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
> diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
> index 3a71dfeed9..f628714c24 100644
> --- a/lib/librte_vhost/vhost.h
> +++ b/lib/librte_vhost/vhost.h
> @@ -133,7 +133,7 @@ struct vhost_virtqueue {
>               struct vring_used       *used;
>               struct vring_packed_desc_event *device_event;
>       };
> -     uint32_t                size;
> +     uint16_t                size;
> 
>       uint16_t                last_avail_idx;
>       uint16_t                last_used_idx;
> @@ -143,29 +143,12 @@ struct vhost_virtqueue {
>  #define VIRTIO_INVALID_EVENTFD               (-1)
>  #define VIRTIO_UNINITIALIZED_EVENTFD (-2)
> 
> -     int                     enabled;
> -     int                     access_ok;
> -     int                     ready;
> -     int                     notif_enable;
> -#define VIRTIO_UNINITIALIZED_NOTIF   (-1)
> +     bool                    enabled;
> +     bool                    access_ok;
> +     bool                    ready;
> 
>       rte_spinlock_t          access_lock;
> 
> -     /* Used to notify the guest (trigger interrupt) */
> -     int                     callfd;
> -     /* Currently unused as polling mode is enabled */
> -     int                     kickfd;
> -
> -     /* Physical address of used ring, for logging */
> -     uint64_t                log_guest_addr;
> -
> -     /* inflight share memory info */
> -     union {
> -             struct rte_vhost_inflight_info_split *inflight_split;
> -             struct rte_vhost_inflight_info_packed *inflight_packed;
> -     };
> -     struct rte_vhost_resubmit_info *resubmit_inflight;
> -     uint64_t                global_counter;
> 
>       union {
>               struct vring_used_elem  *shadow_used_split;
> @@ -176,22 +159,36 @@ struct vhost_virtqueue {
>       uint16_t                shadow_aligned_idx;
>       /* Record packed ring first dequeue desc index */
>       uint16_t                shadow_last_used_idx;
> -     struct vhost_vring_addr ring_addrs;
> 
> -     struct batch_copy_elem  *batch_copy_elems;
>       uint16_t                batch_copy_nb_elems;
> +     struct batch_copy_elem  *batch_copy_elems;
>       bool                    used_wrap_counter;
>       bool                    avail_wrap_counter;
> 
> -     struct log_cache_entry *log_cache;
> -     uint16_t log_cache_nb_elem;
> +     /* Physical address of used ring, for logging */
> +     uint16_t                log_cache_nb_elem;
> +     uint64_t                log_guest_addr;
> +     struct log_cache_entry  *log_cache;
> 
>       rte_rwlock_t    iotlb_lock;
>       rte_rwlock_t    iotlb_pending_lock;
>       struct rte_mempool *iotlb_pool;
>       TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
> -     int                             iotlb_cache_nr;
>       TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
> +     int                             iotlb_cache_nr;
> +
> +     /* Used to notify the guest (trigger interrupt) */
> +     int                     callfd;
> +     /* Currently unused as polling mode is enabled */
> +     int                     kickfd;
> +
> +     /* inflight share memory info */
> +     union {
> +             struct rte_vhost_inflight_info_split *inflight_split;
> +             struct rte_vhost_inflight_info_packed *inflight_packed;
> +     };
> +     struct rte_vhost_resubmit_info *resubmit_inflight;
> +     uint64_t                global_counter;
> 
>       /* operation callbacks for async dma */
>       struct rte_vhost_async_channel_ops      async_ops;
> @@ -212,6 +209,11 @@ struct vhost_virtqueue {
>       bool            async_inorder;
>       bool            async_registered;
>       uint16_t        async_threshold;
> +
> +     int                     notif_enable;
> +#define VIRTIO_UNINITIALIZED_NOTIF   (-1)
> +
> +     struct vhost_vring_addr ring_addrs;
>  } __rte_cache_aligned;
> 
>  /* Virtio device status as per Virtio specification */
> diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
> index 4d9e76e49e..2f4f89aeac 100644
> --- a/lib/librte_vhost/vhost_user.c
> +++ b/lib/librte_vhost/vhost_user.c
> @@ -406,6 +406,11 @@ vhost_user_set_vring_num(struct virtio_net **pdev,
>       if (validate_msg_fds(msg, 0) != 0)
>               return RTE_VHOST_MSG_RESULT_ERR;
> 
> +     if (msg->payload.state.num > 32768) {
> +             VHOST_LOG_CONFIG(ERR, "invalid virtqueue size %u\n", msg-
> >payload.state.num);
> +             return RTE_VHOST_MSG_RESULT_ERR;
> +     }
> +
>       vq->size = msg->payload.state.num;
> 
>       /* VIRTIO 1.0, 2.4 Virtqueues says:
> @@ -425,12 +430,6 @@ vhost_user_set_vring_num(struct virtio_net **pdev,
>               }
>       }
> 
> -     if (vq->size > 32768) {
> -             VHOST_LOG_CONFIG(ERR,
> -                     "invalid virtqueue size %u\n", vq->size);
> -             return RTE_VHOST_MSG_RESULT_ERR;
> -     }
> -
>       if (vq_is_packed(dev)) {
>               if (vq->shadow_used_packed)
>                       rte_free(vq->shadow_used_packed);
> @@ -713,7 +712,7 @@ translate_ring_addresses(struct virtio_net *dev, int
> vq_index)
>                       return dev;
>               }
> 
> -             vq->access_ok = 1;
> +             vq->access_ok = true;
>               return dev;
>       }
> 
> @@ -771,7 +770,7 @@ translate_ring_addresses(struct virtio_net *dev, int
> vq_index)
>               vq->last_avail_idx = vq->used->idx;
>       }
> 
> -     vq->access_ok = 1;
> +     vq->access_ok = true;
> 
>       VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address desc: %p\n",
>                       dev->vid, vq->desc);
> @@ -1658,7 +1657,7 @@ vhost_user_set_vring_call(struct virtio_net **pdev,
> struct VhostUserMsg *msg,
>       vq = dev->virtqueue[file.index];
> 
>       if (vq->ready) {
> -             vq->ready = 0;
> +             vq->ready = false;
>               vhost_user_notify_queue_state(dev, file.index, 0);
>       }
> 
> @@ -1918,14 +1917,14 @@ vhost_user_set_vring_kick(struct virtio_net **pdev,
> struct VhostUserMsg *msg,
>        * the SET_VRING_ENABLE message.
>        */
>       if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
> -             vq->enabled = 1;
> +             vq->enabled = true;
>               if (dev->notify_ops->vring_state_changed)
>                       dev->notify_ops->vring_state_changed(
>                               dev->vid, file.index, 1);
>       }
> 
>       if (vq->ready) {
> -             vq->ready = 0;
> +             vq->ready = false;
>               vhost_user_notify_queue_state(dev, file.index, 0);
>       }
> 
> @@ -2043,7 +2042,7 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
>                       int main_fd __rte_unused)
>  {
>       struct virtio_net *dev = *pdev;
> -     int enable = (int)msg->payload.state.num;
> +     bool enable = !!msg->payload.state.num;
>       int index = (int)msg->payload.state.index;
> 
>       if (validate_msg_fds(msg, 0) != 0)
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 583bf379c6..3d8e29df09 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -1396,13 +1396,13 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t
> queue_id,
> 
>       rte_spinlock_lock(&vq->access_lock);
> 
> -     if (unlikely(vq->enabled == 0))
> +     if (unlikely(!vq->enabled))
>               goto out_access_unlock;
> 
>       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
>               vhost_user_iotlb_rd_lock(vq);
> 
> -     if (unlikely(vq->access_ok == 0))
> +     if (unlikely(!vq->access_ok))
>               if (unlikely(vring_translate(dev, vq) < 0))
>                       goto out;
> 
> @@ -1753,13 +1753,13 @@ virtio_dev_rx_async_submit(struct virtio_net *dev,
> uint16_t queue_id,
> 
>       rte_spinlock_lock(&vq->access_lock);
> 
> -     if (unlikely(vq->enabled == 0 || !vq->async_registered))
> +     if (unlikely(!vq->enabled || !vq->async_registered))
>               goto out_access_unlock;
> 
>       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
>               vhost_user_iotlb_rd_lock(vq);
> 
> -     if (unlikely(vq->access_ok == 0))
> +     if (unlikely(!vq->access_ok))
>               if (unlikely(vring_translate(dev, vq) < 0))
>                       goto out;
> 
> @@ -2518,7 +2518,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
>       if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
>               return 0;
> 
> -     if (unlikely(vq->enabled == 0)) {
> +     if (unlikely(!vq->enabled)) {
>               count = 0;
>               goto out_access_unlock;
>       }
> @@ -2526,7 +2526,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
>       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
>               vhost_user_iotlb_rd_lock(vq);
> 
> -     if (unlikely(vq->access_ok == 0))
> +     if (unlikely(!vq->access_ok))
>               if (unlikely(vring_translate(dev, vq) < 0)) {
>                       count = 0;
>                       goto out;
> --
> 2.30.2

Reviewed-by: Chenbo Xia <chenbo....@intel.com>

Reply via email to