On 09/05/2017 09:11 AM, Tiwei Bie wrote:
On Thu, Aug 31, 2017 at 11:50:10AM +0200, Maxime Coquelin wrote:
In order to be able to handle other ports or queues while waiting
for an IOTLB miss reply, a pending list is created so that waiter
can return and restart later on with sending again a miss request.
Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com>
---
lib/librte_vhost/iotlb.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++--
lib/librte_vhost/iotlb.h | 4 +++
lib/librte_vhost/vhost.h | 1 +
3 files changed, 91 insertions(+), 2 deletions(-)
diff --git a/lib/librte_vhost/iotlb.c b/lib/librte_vhost/iotlb.c
index 1b739dae5..d014bfe98 100644
--- a/lib/librte_vhost/iotlb.c
+++ b/lib/librte_vhost/iotlb.c
@@ -49,7 +49,86 @@ struct vhost_iotlb_entry {
uint8_t perm;
};
-#define IOTLB_CACHE_SIZE 1024
+#define IOTLB_CACHE_SIZE 2048
+
+static void vhost_user_iotlb_pending_remove_all(struct vhost_virtqueue *vq)
+{
+ struct vhost_iotlb_entry *node, *temp_node;
+
+ rte_rwlock_write_lock(&vq->iotlb_lock);
+
+ TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
+ TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
+ rte_mempool_put(vq->iotlb_pool, node);
+ }
+
+ rte_rwlock_write_unlock(&vq->iotlb_lock);
+}
+
+int vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
+ uint8_t perm)
+{
+ struct vhost_iotlb_entry *node;
+ int found = 0;
+
The return value of this function is boolean. So it's better
to return bool instead of int.
Fixed.
+ rte_rwlock_read_lock(&vq->iotlb_lock);
+
+ TAILQ_FOREACH(node, &vq->iotlb_pending_list, next) {
+ if ((node->iova == iova) && (node->perm == perm)) {
+ found = 1;
+ break;
+ }
+ }
+
+ rte_rwlock_read_unlock(&vq->iotlb_lock);
+
+ return found;
+}
+
+void vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq,
+ uint64_t iova, uint8_t perm)
+{
+ struct vhost_iotlb_entry *node;
+ int ret;
+
+ ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
+ if (ret) {
+ RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool empty, invalidate
cache\n");
I think The log level should be INFO or the likes, not ERR.
Fixed.
+ vhost_user_iotlb_pending_remove_all(vq);
+ ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
+ if (ret) {
+ RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool still empty,
failure\n");
+ return;
+ }
+ }
+
+ node->iova = iova;
+ node->perm = perm;
+
+ rte_rwlock_write_lock(&vq->iotlb_lock);
+
+ TAILQ_INSERT_TAIL(&vq->iotlb_pending_list, node, next);
+
+ rte_rwlock_write_unlock(&vq->iotlb_lock);
+}
+
+static void vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t size, uint8_t perm)
+{
+ struct vhost_iotlb_entry *node, *temp_node;
+
+ /* .iotlb_lock already locked by the caller */
+ TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
+ if (node->iova < iova)
+ continue;
+ if (node->iova >= iova + size)
+ continue;
+ if ((node->perm & perm) != node->perm)
+ continue;
+ TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
+ rte_mempool_put(vq->iotlb_pool, node);
+ }
+}
static void vhost_user_iotlb_cache_remove_all(struct vhost_virtqueue *vq)
{
@@ -106,7 +185,10 @@ void vhost_user_iotlb_cache_insert(struct vhost_virtqueue
*vq, uint64_t iova,
TAILQ_INSERT_TAIL(&vq->iotlb_list, new_node, next);
unlock:
+ vhost_user_iotlb_pending_remove(vq, iova, size, perm);
+
rte_rwlock_write_unlock(&vq->iotlb_lock);
+
This empty line should be removed.
Yes, this part disappears in next version, as I squashed patch 21 in
patches 7 & 8.
Thanks,
Maxime
Best regards,
Tiwei Bie
}
void vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq,