Be a bit more strict when a programmatic error is detected wrt to the access_lock not being taken. Mark the new helper with __rte_assert_exclusive_lock so that clang understands where locks are expected to be taken.
Signed-off-by: David Marchand <david.march...@redhat.com> --- lib/vhost/vhost.c | 18 +++--------------- lib/vhost/vhost.h | 10 ++++++++++ lib/vhost/virtio_net.c | 6 +----- 3 files changed, 14 insertions(+), 20 deletions(-) diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c index 19c7b92c32..8cd727ca2f 100644 --- a/lib/vhost/vhost.c +++ b/lib/vhost/vhost.c @@ -1781,11 +1781,7 @@ rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id) if (unlikely(vq == NULL || !dev->async_copy)) return -1; - if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) { - VHOST_LOG_CONFIG(dev->ifname, ERR, "%s() called without access lock taken.\n", - __func__); - return -1; - } + vq_assert_lock(dev, vq); return async_channel_register(dev, vq); } @@ -1847,11 +1843,7 @@ rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id) if (vq == NULL) return -1; - if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) { - VHOST_LOG_CONFIG(dev->ifname, ERR, "%s() called without access lock taken.\n", - __func__); - return -1; - } + vq_assert_lock(dev, vq); if (!vq->async) return 0; @@ -1994,11 +1986,7 @@ rte_vhost_async_get_inflight_thread_unsafe(int vid, uint16_t queue_id) if (vq == NULL) return ret; - if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) { - VHOST_LOG_CONFIG(dev->ifname, ERR, "%s() called without access lock taken.\n", - __func__); - return -1; - } + vq_assert_lock(dev, vq); if (!vq->async) return ret; diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h index ef211ed519..f6b2930efd 100644 --- a/lib/vhost/vhost.h +++ b/lib/vhost/vhost.h @@ -512,6 +512,16 @@ struct virtio_net { struct rte_vhost_user_extern_ops extern_ops; } __rte_cache_aligned; +static inline void +vq_assert_lock__(struct virtio_net *dev, struct vhost_virtqueue *vq, const char *func) + __rte_assert_exclusive_lock(&vq->access_lock) +{ + if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) + rte_panic("VHOST_CONFIG: (%s) %s() called without access lock taken.\n", + dev->ifname, func); +} +#define vq_assert_lock(dev, vq) vq_assert_lock__(dev, vq, __func__) + static __rte_always_inline bool vq_is_packed(struct virtio_net *dev) { diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index 9abf752f30..2a75cda7b6 100644 --- a/lib/vhost/virtio_net.c +++ b/lib/vhost/virtio_net.c @@ -2185,11 +2185,7 @@ rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id, vq = dev->virtqueue[queue_id]; - if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) { - VHOST_LOG_DATA(dev->ifname, ERR, "%s() called without access lock taken.\n", - __func__); - return -1; - } + vq_assert_lock(dev, vq); if (unlikely(!vq->async)) { VHOST_LOG_DATA(dev->ifname, ERR, -- 2.39.0