This patch simplifies IOTLB implementation and improves
IOTLB memory consumption by having a single IOTLB cache
per device, instead of having one per queue.

In order to not impact performance, it keeps an IOTLB lock
per virtqueue, so that there is no contention between
multiple queue trying to acquire it.

Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com>
---
 lib/vhost/iotlb.c      | 212 +++++++++++++++++++----------------------
 lib/vhost/iotlb.h      |  43 ++++++---
 lib/vhost/vhost.c      |  18 ++--
 lib/vhost/vhost.h      |  16 ++--
 lib/vhost/vhost_user.c |  25 +++--
 5 files changed, 160 insertions(+), 154 deletions(-)

diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c
index f598c0a8c4..a91115cf1c 100644
--- a/lib/vhost/iotlb.c
+++ b/lib/vhost/iotlb.c
@@ -74,86 +74,81 @@ vhost_user_iotlb_clear_dump(struct virtio_net *dev, struct 
vhost_iotlb_entry *no
 }
 
 static struct vhost_iotlb_entry *
-vhost_user_iotlb_pool_get(struct vhost_virtqueue *vq)
+vhost_user_iotlb_pool_get(struct virtio_net *dev)
 {
        struct vhost_iotlb_entry *node;
 
-       rte_spinlock_lock(&vq->iotlb_free_lock);
-       node = SLIST_FIRST(&vq->iotlb_free_list);
+       rte_spinlock_lock(&dev->iotlb_free_lock);
+       node = SLIST_FIRST(&dev->iotlb_free_list);
        if (node != NULL)
-               SLIST_REMOVE_HEAD(&vq->iotlb_free_list, next_free);
-       rte_spinlock_unlock(&vq->iotlb_free_lock);
+               SLIST_REMOVE_HEAD(&dev->iotlb_free_list, next_free);
+       rte_spinlock_unlock(&dev->iotlb_free_lock);
        return node;
 }
 
 static void
-vhost_user_iotlb_pool_put(struct vhost_virtqueue *vq,
-       struct vhost_iotlb_entry *node)
+vhost_user_iotlb_pool_put(struct virtio_net *dev, struct vhost_iotlb_entry 
*node)
 {
-       rte_spinlock_lock(&vq->iotlb_free_lock);
-       SLIST_INSERT_HEAD(&vq->iotlb_free_list, node, next_free);
-       rte_spinlock_unlock(&vq->iotlb_free_lock);
+       rte_spinlock_lock(&dev->iotlb_free_lock);
+       SLIST_INSERT_HEAD(&dev->iotlb_free_list, node, next_free);
+       rte_spinlock_unlock(&dev->iotlb_free_lock);
 }
 
 static void
-vhost_user_iotlb_cache_random_evict(struct virtio_net *dev, struct 
vhost_virtqueue *vq);
+vhost_user_iotlb_cache_random_evict(struct virtio_net *dev);
 
 static void
-vhost_user_iotlb_pending_remove_all(struct vhost_virtqueue *vq)
+vhost_user_iotlb_pending_remove_all(struct virtio_net *dev)
 {
        struct vhost_iotlb_entry *node, *temp_node;
 
-       rte_rwlock_write_lock(&vq->iotlb_pending_lock);
+       rte_rwlock_write_lock(&dev->iotlb_pending_lock);
 
-       RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
-               TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
-               vhost_user_iotlb_pool_put(vq, node);
+       RTE_TAILQ_FOREACH_SAFE(node, &dev->iotlb_pending_list, next, temp_node) 
{
+               TAILQ_REMOVE(&dev->iotlb_pending_list, node, next);
+               vhost_user_iotlb_pool_put(dev, node);
        }
 
-       rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
+       rte_rwlock_write_unlock(&dev->iotlb_pending_lock);
 }
 
 bool
-vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
-                               uint8_t perm)
+vhost_user_iotlb_pending_miss(struct virtio_net *dev, uint64_t iova, uint8_t 
perm)
 {
        struct vhost_iotlb_entry *node;
        bool found = false;
 
-       rte_rwlock_read_lock(&vq->iotlb_pending_lock);
+       rte_rwlock_read_lock(&dev->iotlb_pending_lock);
 
-       TAILQ_FOREACH(node, &vq->iotlb_pending_list, next) {
+       TAILQ_FOREACH(node, &dev->iotlb_pending_list, next) {
                if ((node->iova == iova) && (node->perm == perm)) {
                        found = true;
                        break;
                }
        }
 
-       rte_rwlock_read_unlock(&vq->iotlb_pending_lock);
+       rte_rwlock_read_unlock(&dev->iotlb_pending_lock);
 
        return found;
 }
 
 void
-vhost_user_iotlb_pending_insert(struct virtio_net *dev, struct vhost_virtqueue 
*vq,
-                               uint64_t iova, uint8_t perm)
+vhost_user_iotlb_pending_insert(struct virtio_net *dev, uint64_t iova, uint8_t 
perm)
 {
        struct vhost_iotlb_entry *node;
 
-       node = vhost_user_iotlb_pool_get(vq);
+       node = vhost_user_iotlb_pool_get(dev);
        if (node == NULL) {
                VHOST_LOG_CONFIG(dev->ifname, DEBUG,
-                       "IOTLB pool for vq %"PRIu32" empty, clear entries for 
pending insertion\n",
-                       vq->index);
-               if (!TAILQ_EMPTY(&vq->iotlb_pending_list))
-                       vhost_user_iotlb_pending_remove_all(vq);
+                       "IOTLB pool empty, clear entries for pending 
insertion\n");
+               if (!TAILQ_EMPTY(&dev->iotlb_pending_list))
+                       vhost_user_iotlb_pending_remove_all(dev);
                else
-                       vhost_user_iotlb_cache_random_evict(dev, vq);
-               node = vhost_user_iotlb_pool_get(vq);
+                       vhost_user_iotlb_cache_random_evict(dev);
+               node = vhost_user_iotlb_pool_get(dev);
                if (node == NULL) {
                        VHOST_LOG_CONFIG(dev->ifname, ERR,
-                               "IOTLB pool vq %"PRIu32" still empty, pending 
insertion failure\n",
-                               vq->index);
+                               "IOTLB pool still empty, pending insertion 
failure\n");
                        return;
                }
        }
@@ -161,22 +156,21 @@ vhost_user_iotlb_pending_insert(struct virtio_net *dev, 
struct vhost_virtqueue *
        node->iova = iova;
        node->perm = perm;
 
-       rte_rwlock_write_lock(&vq->iotlb_pending_lock);
+       rte_rwlock_write_lock(&dev->iotlb_pending_lock);
 
-       TAILQ_INSERT_TAIL(&vq->iotlb_pending_list, node, next);
+       TAILQ_INSERT_TAIL(&dev->iotlb_pending_list, node, next);
 
-       rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
+       rte_rwlock_write_unlock(&dev->iotlb_pending_lock);
 }
 
 void
-vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq,
-                               uint64_t iova, uint64_t size, uint8_t perm)
+vhost_user_iotlb_pending_remove(struct virtio_net *dev, uint64_t iova, 
uint64_t size, uint8_t perm)
 {
        struct vhost_iotlb_entry *node, *temp_node;
 
-       rte_rwlock_write_lock(&vq->iotlb_pending_lock);
+       rte_rwlock_write_lock(&dev->iotlb_pending_lock);
 
-       RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next,
+       RTE_TAILQ_FOREACH_SAFE(node, &dev->iotlb_pending_list, next,
                                temp_node) {
                if (node->iova < iova)
                        continue;
@@ -184,81 +178,78 @@ vhost_user_iotlb_pending_remove(struct vhost_virtqueue 
*vq,
                        continue;
                if ((node->perm & perm) != node->perm)
                        continue;
-               TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
-               vhost_user_iotlb_pool_put(vq, node);
+               TAILQ_REMOVE(&dev->iotlb_pending_list, node, next);
+               vhost_user_iotlb_pool_put(dev, node);
        }
 
-       rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
+       rte_rwlock_write_unlock(&dev->iotlb_pending_lock);
 }
 
 static void
-vhost_user_iotlb_cache_remove_all(struct virtio_net *dev, struct 
vhost_virtqueue *vq)
+vhost_user_iotlb_cache_remove_all(struct virtio_net *dev)
 {
        struct vhost_iotlb_entry *node, *temp_node;
 
-       rte_rwlock_write_lock(&vq->iotlb_lock);
+       vhost_user_iotlb_wr_lock_all(dev);
 
-       RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
+       RTE_TAILQ_FOREACH_SAFE(node, &dev->iotlb_list, next, temp_node) {
                vhost_user_iotlb_set_dump(dev, node);
 
-               TAILQ_REMOVE(&vq->iotlb_list, node, next);
-               vhost_user_iotlb_pool_put(vq, node);
+               TAILQ_REMOVE(&dev->iotlb_list, node, next);
+               vhost_user_iotlb_pool_put(dev, node);
        }
 
-       vq->iotlb_cache_nr = 0;
+       dev->iotlb_cache_nr = 0;
 
-       rte_rwlock_write_unlock(&vq->iotlb_lock);
+       vhost_user_iotlb_wr_unlock_all(dev);
 }
 
 static void
-vhost_user_iotlb_cache_random_evict(struct virtio_net *dev, struct 
vhost_virtqueue *vq)
+vhost_user_iotlb_cache_random_evict(struct virtio_net *dev)
 {
        struct vhost_iotlb_entry *node, *temp_node, *prev_node = NULL;
        int entry_idx;
 
-       rte_rwlock_write_lock(&vq->iotlb_lock);
+       vhost_user_iotlb_wr_lock_all(dev);
 
-       entry_idx = rte_rand() % vq->iotlb_cache_nr;
+       entry_idx = rte_rand() % dev->iotlb_cache_nr;
 
-       RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
+       RTE_TAILQ_FOREACH_SAFE(node, &dev->iotlb_list, next, temp_node) {
                if (!entry_idx) {
                        struct vhost_iotlb_entry *next_node = 
RTE_TAILQ_NEXT(node, next);
 
                        vhost_user_iotlb_clear_dump(dev, node, prev_node, 
next_node);
 
-                       TAILQ_REMOVE(&vq->iotlb_list, node, next);
-                       vhost_user_iotlb_pool_put(vq, node);
-                       vq->iotlb_cache_nr--;
+                       TAILQ_REMOVE(&dev->iotlb_list, node, next);
+                       vhost_user_iotlb_pool_put(dev, node);
+                       dev->iotlb_cache_nr--;
                        break;
                }
                prev_node = node;
                entry_idx--;
        }
 
-       rte_rwlock_write_unlock(&vq->iotlb_lock);
+       vhost_user_iotlb_wr_unlock_all(dev);
 }
 
 void
-vhost_user_iotlb_cache_insert(struct virtio_net *dev, struct vhost_virtqueue 
*vq,
-                               uint64_t iova, uint64_t uaddr,
+vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t 
uaddr,
                                uint64_t size, uint8_t perm)
 {
        struct vhost_iotlb_entry *node, *new_node;
 
-       new_node = vhost_user_iotlb_pool_get(vq);
+       new_node = vhost_user_iotlb_pool_get(dev);
        if (new_node == NULL) {
                VHOST_LOG_CONFIG(dev->ifname, DEBUG,
-                       "IOTLB pool vq %"PRIu32" empty, clear entries for cache 
insertion\n",
-                       vq->index);
-               if (!TAILQ_EMPTY(&vq->iotlb_list))
-                       vhost_user_iotlb_cache_random_evict(dev, vq);
+                       "IOTLB pool empty, clear entries for cache 
insertion\n");
+               if (!TAILQ_EMPTY(&dev->iotlb_list))
+                       vhost_user_iotlb_cache_random_evict(dev);
                else
-                       vhost_user_iotlb_pending_remove_all(vq);
-               new_node = vhost_user_iotlb_pool_get(vq);
+                       vhost_user_iotlb_pending_remove_all(dev);
+               new_node = vhost_user_iotlb_pool_get(dev);
                if (new_node == NULL) {
                        VHOST_LOG_CONFIG(dev->ifname, ERR,
-                               "IOTLB pool vq %"PRIu32" still empty, cache 
insertion failed\n",
-                               vq->index);
+                               "IOTLB pool still empty, cache insertion 
failed\n");
                        return;
                }
        }
@@ -268,49 +259,47 @@ vhost_user_iotlb_cache_insert(struct virtio_net *dev, 
struct vhost_virtqueue *vq
        new_node->size = size;
        new_node->perm = perm;
 
-       rte_rwlock_write_lock(&vq->iotlb_lock);
+       vhost_user_iotlb_wr_lock_all(dev);
 
-       TAILQ_FOREACH(node, &vq->iotlb_list, next) {
+       TAILQ_FOREACH(node, &dev->iotlb_list, next) {
                /*
                 * Entries must be invalidated before being updated.
                 * So if iova already in list, assume identical.
                 */
                if (node->iova == new_node->iova) {
-                       vhost_user_iotlb_pool_put(vq, new_node);
+                       vhost_user_iotlb_pool_put(dev, new_node);
                        goto unlock;
                } else if (node->iova > new_node->iova) {
                        vhost_user_iotlb_set_dump(dev, new_node);
 
                        TAILQ_INSERT_BEFORE(node, new_node, next);
-                       vq->iotlb_cache_nr++;
+                       dev->iotlb_cache_nr++;
                        goto unlock;
                }
        }
 
        vhost_user_iotlb_set_dump(dev, new_node);
 
-       TAILQ_INSERT_TAIL(&vq->iotlb_list, new_node, next);
-       vq->iotlb_cache_nr++;
+       TAILQ_INSERT_TAIL(&dev->iotlb_list, new_node, next);
+       dev->iotlb_cache_nr++;
 
 unlock:
-       vhost_user_iotlb_pending_remove(vq, iova, size, perm);
-
-       rte_rwlock_write_unlock(&vq->iotlb_lock);
+       vhost_user_iotlb_pending_remove(dev, iova, size, perm);
 
+       vhost_user_iotlb_wr_unlock_all(dev);
 }
 
 void
-vhost_user_iotlb_cache_remove(struct virtio_net *dev, struct vhost_virtqueue 
*vq,
-                                       uint64_t iova, uint64_t size)
+vhost_user_iotlb_cache_remove(struct virtio_net *dev, uint64_t iova, uint64_t 
size)
 {
        struct vhost_iotlb_entry *node, *temp_node, *prev_node = NULL;
 
        if (unlikely(!size))
                return;
 
-       rte_rwlock_write_lock(&vq->iotlb_lock);
+       vhost_user_iotlb_wr_lock_all(dev);
 
-       RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
+       RTE_TAILQ_FOREACH_SAFE(node, &dev->iotlb_list, next, temp_node) {
                /* Sorted list */
                if (unlikely(iova + size < node->iova))
                        break;
@@ -320,19 +309,19 @@ vhost_user_iotlb_cache_remove(struct virtio_net *dev, 
struct vhost_virtqueue *vq
 
                        vhost_user_iotlb_clear_dump(dev, node, prev_node, 
next_node);
 
-                       TAILQ_REMOVE(&vq->iotlb_list, node, next);
-                       vhost_user_iotlb_pool_put(vq, node);
-                       vq->iotlb_cache_nr--;
-               } else
+                       TAILQ_REMOVE(&dev->iotlb_list, node, next);
+                       vhost_user_iotlb_pool_put(dev, node);
+                       dev->iotlb_cache_nr--;
+               } else {
                        prev_node = node;
+               }
        }
 
-       rte_rwlock_write_unlock(&vq->iotlb_lock);
+       vhost_user_iotlb_wr_unlock_all(dev);
 }
 
 uint64_t
-vhost_user_iotlb_cache_find(struct vhost_virtqueue *vq, uint64_t iova,
-                                               uint64_t *size, uint8_t perm)
+vhost_user_iotlb_cache_find(struct virtio_net *dev, uint64_t iova, uint64_t 
*size, uint8_t perm)
 {
        struct vhost_iotlb_entry *node;
        uint64_t offset, vva = 0, mapped = 0;
@@ -340,7 +329,7 @@ vhost_user_iotlb_cache_find(struct vhost_virtqueue *vq, 
uint64_t iova,
        if (unlikely(!*size))
                goto out;
 
-       TAILQ_FOREACH(node, &vq->iotlb_list, next) {
+       TAILQ_FOREACH(node, &dev->iotlb_list, next) {
                /* List sorted by iova */
                if (unlikely(iova < node->iova))
                        break;
@@ -373,60 +362,57 @@ vhost_user_iotlb_cache_find(struct vhost_virtqueue *vq, 
uint64_t iova,
 }
 
 void
-vhost_user_iotlb_flush_all(struct virtio_net *dev, struct vhost_virtqueue *vq)
+vhost_user_iotlb_flush_all(struct virtio_net *dev)
 {
-       vhost_user_iotlb_cache_remove_all(dev, vq);
-       vhost_user_iotlb_pending_remove_all(vq);
+       vhost_user_iotlb_cache_remove_all(dev);
+       vhost_user_iotlb_pending_remove_all(dev);
 }
 
 int
-vhost_user_iotlb_init(struct virtio_net *dev, struct vhost_virtqueue *vq)
+vhost_user_iotlb_init(struct virtio_net *dev)
 {
        unsigned int i;
        int socket = 0;
 
-       if (vq->iotlb_pool) {
+       if (dev->iotlb_pool) {
                /*
                 * The cache has already been initialized,
                 * just drop all cached and pending entries.
                 */
-               vhost_user_iotlb_flush_all(dev, vq);
-               rte_free(vq->iotlb_pool);
+               vhost_user_iotlb_flush_all(dev);
+               rte_free(dev->iotlb_pool);
        }
 
 #ifdef RTE_LIBRTE_VHOST_NUMA
-       if (get_mempolicy(&socket, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR) != 0)
+       if (get_mempolicy(&socket, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR) != 
0)
                socket = 0;
 #endif
 
-       rte_spinlock_init(&vq->iotlb_free_lock);
-       rte_rwlock_init(&vq->iotlb_lock);
-       rte_rwlock_init(&vq->iotlb_pending_lock);
+       rte_spinlock_init(&dev->iotlb_free_lock);
+       rte_rwlock_init(&dev->iotlb_pending_lock);
 
-       SLIST_INIT(&vq->iotlb_free_list);
-       TAILQ_INIT(&vq->iotlb_list);
-       TAILQ_INIT(&vq->iotlb_pending_list);
+       SLIST_INIT(&dev->iotlb_free_list);
+       TAILQ_INIT(&dev->iotlb_list);
+       TAILQ_INIT(&dev->iotlb_pending_list);
 
        if (dev->flags & VIRTIO_DEV_SUPPORT_IOMMU) {
-               vq->iotlb_pool = rte_calloc_socket("iotlb", IOTLB_CACHE_SIZE,
+               dev->iotlb_pool = rte_calloc_socket("iotlb", IOTLB_CACHE_SIZE,
                        sizeof(struct vhost_iotlb_entry), 0, socket);
-               if (!vq->iotlb_pool) {
-                       VHOST_LOG_CONFIG(dev->ifname, ERR,
-                               "Failed to create IOTLB cache pool for vq 
%"PRIu32"\n",
-                               vq->index);
+               if (!dev->iotlb_pool) {
+                       VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to create 
IOTLB cache pool\n");
                        return -1;
                }
                for (i = 0; i < IOTLB_CACHE_SIZE; i++)
-                       vhost_user_iotlb_pool_put(vq, &vq->iotlb_pool[i]);
+                       vhost_user_iotlb_pool_put(dev, &dev->iotlb_pool[i]);
        }
 
-       vq->iotlb_cache_nr = 0;
+       dev->iotlb_cache_nr = 0;
 
        return 0;
 }
 
 void
-vhost_user_iotlb_destroy(struct vhost_virtqueue *vq)
+vhost_user_iotlb_destroy(struct virtio_net *dev)
 {
-       rte_free(vq->iotlb_pool);
+       rte_free(dev->iotlb_pool);
 }
diff --git a/lib/vhost/iotlb.h b/lib/vhost/iotlb.h
index 73b5465b41..3490b9e6be 100644
--- a/lib/vhost/iotlb.h
+++ b/lib/vhost/iotlb.h
@@ -37,20 +37,37 @@ vhost_user_iotlb_wr_unlock(struct vhost_virtqueue *vq)
        rte_rwlock_write_unlock(&vq->iotlb_lock);
 }
 
-void vhost_user_iotlb_cache_insert(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
-                                       uint64_t iova, uint64_t uaddr,
+static __rte_always_inline void
+vhost_user_iotlb_wr_lock_all(struct virtio_net *dev)
+       __rte_no_thread_safety_analysis
+{
+       uint32_t i;
+
+       for (i = 0; i < dev->nr_vring; i++)
+               rte_rwlock_write_lock(&dev->virtqueue[i]->iotlb_lock);
+}
+
+static __rte_always_inline void
+vhost_user_iotlb_wr_unlock_all(struct virtio_net *dev)
+       __rte_no_thread_safety_analysis
+{
+       uint32_t i;
+
+       for (i = 0; i < dev->nr_vring; i++)
+               rte_rwlock_write_unlock(&dev->virtqueue[i]->iotlb_lock);
+}
+
+void vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, 
uint64_t uaddr,
                                        uint64_t size, uint8_t perm);
-void vhost_user_iotlb_cache_remove(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
-                                       uint64_t iova, uint64_t size);
-uint64_t vhost_user_iotlb_cache_find(struct vhost_virtqueue *vq, uint64_t iova,
+void vhost_user_iotlb_cache_remove(struct virtio_net *dev, uint64_t iova, 
uint64_t size);
+uint64_t vhost_user_iotlb_cache_find(struct virtio_net *dev, uint64_t iova,
                                        uint64_t *size, uint8_t perm);
-bool vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
-                                               uint8_t perm);
-void vhost_user_iotlb_pending_insert(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
-                                               uint64_t iova, uint8_t perm);
-void vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq, uint64_t iova,
+bool vhost_user_iotlb_pending_miss(struct virtio_net *dev, uint64_t iova, 
uint8_t perm);
+void vhost_user_iotlb_pending_insert(struct virtio_net *dev, uint64_t iova, 
uint8_t perm);
+void vhost_user_iotlb_pending_remove(struct virtio_net *dev, uint64_t iova,
                                                uint64_t size, uint8_t perm);
-void vhost_user_iotlb_flush_all(struct virtio_net *dev, struct vhost_virtqueue 
*vq);
-int vhost_user_iotlb_init(struct virtio_net *dev, struct vhost_virtqueue *vq);
-void vhost_user_iotlb_destroy(struct vhost_virtqueue *vq);
+void vhost_user_iotlb_flush_all(struct virtio_net *dev);
+int vhost_user_iotlb_init(struct virtio_net *dev);
+void vhost_user_iotlb_destroy(struct virtio_net *dev);
+
 #endif /* _VHOST_IOTLB_H_ */
diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
index ef37943817..d35075b96c 100644
--- a/lib/vhost/vhost.c
+++ b/lib/vhost/vhost.c
@@ -63,7 +63,7 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
 
        tmp_size = *size;
 
-       vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
+       vva = vhost_user_iotlb_cache_find(dev, iova, &tmp_size, perm);
        if (tmp_size == *size) {
                if (dev->flags & VIRTIO_DEV_STATS_ENABLED)
                        vq->stats.iotlb_hits++;
@@ -75,7 +75,7 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
 
        iova += tmp_size;
 
-       if (!vhost_user_iotlb_pending_miss(vq, iova, perm)) {
+       if (!vhost_user_iotlb_pending_miss(dev, iova, perm)) {
                /*
                 * iotlb_lock is read-locked for a full burst,
                 * but it only protects the iotlb cache.
@@ -85,12 +85,12 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
                 */
                vhost_user_iotlb_rd_unlock(vq);
 
-               vhost_user_iotlb_pending_insert(dev, vq, iova, perm);
+               vhost_user_iotlb_pending_insert(dev, iova, perm);
                if (vhost_user_iotlb_miss(dev, iova, perm)) {
                        VHOST_LOG_DATA(dev->ifname, ERR,
                                "IOTLB miss req failed for IOVA 0x%" PRIx64 
"\n",
                                iova);
-                       vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
+                       vhost_user_iotlb_pending_remove(dev, iova, 1, perm);
                }
 
                vhost_user_iotlb_rd_lock(vq);
@@ -397,7 +397,6 @@ free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
        vhost_free_async_mem(vq);
        rte_spinlock_unlock(&vq->access_lock);
        rte_free(vq->batch_copy_elems);
-       vhost_user_iotlb_destroy(vq);
        rte_free(vq->log_cache);
        rte_free(vq);
 }
@@ -575,7 +574,7 @@ vring_invalidate(struct virtio_net *dev __rte_unused, 
struct vhost_virtqueue *vq
 }
 
 static void
-init_vring_queue(struct virtio_net *dev, struct vhost_virtqueue *vq,
+init_vring_queue(struct virtio_net *dev __rte_unused, struct vhost_virtqueue 
*vq,
        uint32_t vring_idx)
 {
        int numa_node = SOCKET_ID_ANY;
@@ -595,8 +594,6 @@ init_vring_queue(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
        }
 #endif
        vq->numa_node = numa_node;
-
-       vhost_user_iotlb_init(dev, vq);
 }
 
 static void
@@ -631,6 +628,7 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t 
vring_idx)
                dev->virtqueue[i] = vq;
                init_vring_queue(dev, vq, i);
                rte_spinlock_init(&vq->access_lock);
+               rte_rwlock_init(&vq->iotlb_lock);
                vq->avail_wrap_counter = 1;
                vq->used_wrap_counter = 1;
                vq->signalled_used_valid = false;
@@ -795,6 +793,10 @@ vhost_setup_virtio_net(int vid, bool enable, bool 
compliant_ol_flags, bool stats
                dev->flags |= VIRTIO_DEV_SUPPORT_IOMMU;
        else
                dev->flags &= ~VIRTIO_DEV_SUPPORT_IOMMU;
+
+       if (vhost_user_iotlb_init(dev) < 0)
+               VHOST_LOG_CONFIG("device", ERR, "failed to init IOTLB\n");
+
 }
 
 void
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 40863f7bfd..67cc4a2fdb 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -302,13 +302,6 @@ struct vhost_virtqueue {
        struct log_cache_entry  *log_cache;
 
        rte_rwlock_t    iotlb_lock;
-       rte_rwlock_t    iotlb_pending_lock;
-       struct vhost_iotlb_entry *iotlb_pool;
-       TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
-       TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
-       int                             iotlb_cache_nr;
-       rte_spinlock_t  iotlb_free_lock;
-       SLIST_HEAD(, vhost_iotlb_entry) iotlb_free_list;
 
        /* Used to notify the guest (trigger interrupt) */
        int                     callfd;
@@ -483,6 +476,15 @@ struct virtio_net {
        int                     extbuf;
        int                     linearbuf;
        struct vhost_virtqueue  *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
+
+       rte_rwlock_t    iotlb_pending_lock;
+       struct vhost_iotlb_entry *iotlb_pool;
+       TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
+       TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
+       int                             iotlb_cache_nr;
+       rte_spinlock_t  iotlb_free_lock;
+       SLIST_HEAD(, vhost_iotlb_entry) iotlb_free_list;
+
        struct inflight_mem_info *inflight_info;
 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
        char                    ifname[IF_NAME_SZ];
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index d60e39b6bc..81ebef0137 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -7,7 +7,7 @@
  * The vhost-user protocol connection is an external interface, so it must be
  * robust against invalid inputs.
  *
- * This is important because the vhost-user frontend is only one step removed
+* This is important because the vhost-user frontend is only one step removed
  * from the guest.  Malicious guests that have escaped will then launch further
  * attacks from the vhost-user frontend.
  *
@@ -237,6 +237,8 @@ vhost_backend_cleanup(struct virtio_net *dev)
        }
 
        dev->postcopy_listening = 0;
+
+       vhost_user_iotlb_destroy(dev);
 }
 
 static void
@@ -539,7 +541,6 @@ numa_realloc(struct virtio_net **pdev, struct 
vhost_virtqueue **pvq)
        if (vq != dev->virtqueue[vq->index]) {
                VHOST_LOG_CONFIG(dev->ifname, INFO, "reallocated virtqueue on 
node %d\n", node);
                dev->virtqueue[vq->index] = vq;
-               vhost_user_iotlb_init(dev, vq);
        }
 
        if (vq_is_packed(dev)) {
@@ -664,6 +665,8 @@ numa_realloc(struct virtio_net **pdev, struct 
vhost_virtqueue **pvq)
                return;
        }
        dev->guest_pages = gp;
+
+       vhost_user_iotlb_init(dev);
 }
 #else
 static void
@@ -1360,8 +1363,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
 
                /* Flush IOTLB cache as previous HVAs are now invalid */
                if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
-                       for (i = 0; i < dev->nr_vring; i++)
-                               vhost_user_iotlb_flush_all(dev, 
dev->virtqueue[i]);
+                       vhost_user_iotlb_flush_all(dev);
 
                free_mem_region(dev);
                rte_free(dev->mem);
@@ -2194,7 +2196,7 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
        ctx->msg.size = sizeof(ctx->msg.payload.state);
        ctx->fd_num = 0;
 
-       vhost_user_iotlb_flush_all(dev, vq);
+       vhost_user_iotlb_flush_all(dev);
 
        vring_invalidate(dev, vq);
 
@@ -2639,15 +2641,14 @@ vhost_user_iotlb_msg(struct virtio_net **pdev,
                if (!vva)
                        return RTE_VHOST_MSG_RESULT_ERR;
 
+               vhost_user_iotlb_cache_insert(dev, imsg->iova, vva, len, 
imsg->perm);
+
                for (i = 0; i < dev->nr_vring; i++) {
                        struct vhost_virtqueue *vq = dev->virtqueue[i];
 
                        if (!vq)
                                continue;
 
-                       vhost_user_iotlb_cache_insert(dev, vq, imsg->iova, vva,
-                                       len, imsg->perm);
-
                        if (is_vring_iotlb(dev, vq, imsg)) {
                                rte_spinlock_lock(&vq->access_lock);
                                translate_ring_addresses(&dev, &vq);
@@ -2657,15 +2658,14 @@ vhost_user_iotlb_msg(struct virtio_net **pdev,
                }
                break;
        case VHOST_IOTLB_INVALIDATE:
+               vhost_user_iotlb_cache_remove(dev, imsg->iova, imsg->size);
+
                for (i = 0; i < dev->nr_vring; i++) {
                        struct vhost_virtqueue *vq = dev->virtqueue[i];
 
                        if (!vq)
                                continue;
 
-                       vhost_user_iotlb_cache_remove(dev, vq, imsg->iova,
-                                       imsg->size);
-
                        if (is_vring_iotlb(dev, vq, imsg)) {
                                rte_spinlock_lock(&vq->access_lock);
                                vring_invalidate(dev, vq);
@@ -2674,8 +2674,7 @@ vhost_user_iotlb_msg(struct virtio_net **pdev,
                }
                break;
        default:
-               VHOST_LOG_CONFIG(dev->ifname, ERR,
-                       "invalid IOTLB message type (%d)\n",
+               VHOST_LOG_CONFIG(dev->ifname, ERR, "invalid IOTLB message type 
(%d)\n",
                        imsg->type);
                return RTE_VHOST_MSG_RESULT_ERR;
        }
-- 
2.39.2

Reply via email to