In order to be able to handle other ports or queues while waiting
for an IOTLB miss reply, a pending list is created so that waiter
can return and restart later on with sending again a miss request.

Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com>
---
 lib/librte_vhost/iotlb.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++-
 lib/librte_vhost/iotlb.h |  6 +++
 lib/librte_vhost/vhost.h |  4 +-
 3 files changed, 104 insertions(+), 3 deletions(-)

diff --git a/lib/librte_vhost/iotlb.c b/lib/librte_vhost/iotlb.c
index fcfdd25d7..066c37a73 100644
--- a/lib/librte_vhost/iotlb.c
+++ b/lib/librte_vhost/iotlb.c
@@ -48,7 +48,94 @@ struct vhost_iotlb_entry {
        uint8_t perm;
 };
 
-#define IOTLB_CACHE_SIZE 1024
+#define IOTLB_CACHE_SIZE 2048
+
+static void
+vhost_user_iotlb_pending_remove_all(struct vhost_virtqueue *vq)
+{
+       struct vhost_iotlb_entry *node, *temp_node;
+
+       rte_rwlock_write_lock(&vq->iotlb_pending_lock);
+
+       TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
+               TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
+               rte_mempool_put(vq->iotlb_pool, node);
+       }
+
+       rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
+}
+
+bool
+vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
+                               uint8_t perm)
+{
+       struct vhost_iotlb_entry *node;
+       bool found = false;
+
+       rte_rwlock_read_lock(&vq->iotlb_pending_lock);
+
+       TAILQ_FOREACH(node, &vq->iotlb_pending_list, next) {
+               if ((node->iova == iova) && (node->perm == perm)) {
+                       found = true;
+                       break;
+               }
+       }
+
+       rte_rwlock_read_unlock(&vq->iotlb_pending_lock);
+
+       return found;
+}
+
+void
+vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq,
+                               uint64_t iova, uint8_t perm)
+{
+       struct vhost_iotlb_entry *node;
+       int ret;
+
+       ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
+       if (ret) {
+               RTE_LOG(INFO, VHOST_CONFIG,
+                               "IOTLB pool empty, clear pending misses\n");
+               vhost_user_iotlb_pending_remove_all(vq);
+               ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
+               if (ret) {
+                       RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool still empty, 
failure\n");
+                       return;
+               }
+       }
+
+       node->iova = iova;
+       node->perm = perm;
+
+       rte_rwlock_write_lock(&vq->iotlb_pending_lock);
+
+       TAILQ_INSERT_TAIL(&vq->iotlb_pending_list, node, next);
+
+       rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
+}
+
+static void
+vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq,
+                               uint64_t iova, uint64_t size, uint8_t perm)
+{
+       struct vhost_iotlb_entry *node, *temp_node;
+
+       rte_rwlock_write_lock(&vq->iotlb_pending_lock);
+
+       TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
+               if (node->iova < iova)
+                       continue;
+               if (node->iova >= iova + size)
+                       continue;
+               if ((node->perm & perm) != node->perm)
+                       continue;
+               TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
+               rte_mempool_put(vq->iotlb_pool, node);
+       }
+
+       rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
+}
 
 static void
 vhost_user_iotlb_cache_remove_all(struct vhost_virtqueue *vq)
@@ -134,7 +221,10 @@ vhost_user_iotlb_cache_insert(struct vhost_virtqueue *vq, 
uint64_t iova,
        vq->iotlb_cache_nr++;
 
 unlock:
+       vhost_user_iotlb_pending_remove(vq, iova, size, perm);
+
        rte_rwlock_write_unlock(&vq->iotlb_lock);
+
 }
 
 void
@@ -215,9 +305,10 @@ vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
        if (vq->iotlb_pool) {
                /*
                 * The cache has already been initialized,
-                * just drop all entries
+                * just drop all cached and pending entries.
                 */
                vhost_user_iotlb_cache_remove_all(vq);
+               vhost_user_iotlb_pending_remove_all(vq);
                return 0;
        }
 
@@ -228,8 +319,10 @@ vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
                socket = 0;
 
        rte_rwlock_init(&vq->iotlb_lock);
+       rte_rwlock_init(&vq->iotlb_pending_lock);
 
        TAILQ_INIT(&vq->iotlb_list);
+       TAILQ_INIT(&vq->iotlb_pending_list);
 
        snprintf(pool_name, sizeof(pool_name), "iotlb_cache_%d_%d",
                        dev->vid, vq_index);
diff --git a/lib/librte_vhost/iotlb.h b/lib/librte_vhost/iotlb.h
index 27b2d6b30..f1a050e44 100644
--- a/lib/librte_vhost/iotlb.h
+++ b/lib/librte_vhost/iotlb.h
@@ -32,6 +32,8 @@
 #ifndef _VHOST_IOTLB_H_
 #define _VHOST_IOTLB_H_
 
+#include <stdbool.h>
+
 #include "vhost.h"
 
 static __rte_always_inline void
@@ -65,6 +67,10 @@ void vhost_user_iotlb_cache_remove(struct vhost_virtqueue 
*vq,
                                        uint64_t iova, uint64_t size);
 uint64_t vhost_user_iotlb_cache_find(struct vhost_virtqueue *vq, uint64_t iova,
                                        uint64_t *size, uint8_t perm);
+bool vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
+                                               uint8_t perm);
+void vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq, uint64_t iova,
+                                               uint8_t perm);
 int vhost_user_iotlb_init(struct virtio_net *dev, int vq_index);
 
 #endif /* _VHOST_IOTLB_H_ */
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 09a00186f..8131bef9c 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -130,9 +130,11 @@ struct vhost_virtqueue {
        uint16_t                batch_copy_nb_elems;
 
        rte_rwlock_t    iotlb_lock;
+       rte_rwlock_t    iotlb_pending_lock;
        struct rte_mempool *iotlb_pool;
        TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
-       int                             iotlb_cache_nr;
+       int                             iotlb_cache_nr; 
+       TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
 } __rte_cache_aligned;
 
 /* Old kernels have no such macros defined */
-- 
2.13.5

Reply via email to