When operating on struct vhost_net_ubuf_ref, the following execution
sequence is theoretically possible:
CPU0 is finalizing DMA operation                   CPU1 is doing 
VHOST_NET_SET_BACKEND
                             // &ubufs->refcount == 2
vhost_net_ubuf_put()                               
vhost_net_ubuf_put_wait_and_free(oldubufs)
                                                     
vhost_net_ubuf_put_and_wait()
                                                       vhost_net_ubuf_put()
                                                         int r = 
atomic_sub_return(1, &ubufs->refcount);
                                                         // r = 1
int r = atomic_sub_return(1, &ubufs->refcount);
// r = 0
                                                      wait_event(ubufs->wait, 
!atomic_read(&ubufs->refcount));
                                                      // no wait occurs here 
because condition is already true
                                                    kfree(ubufs);
if (unlikely(!r))
  wake_up(&ubufs->wait);  // use-after-free

This leads to use-after-free on ubufs access. This happens because CPU1
skips waiting for wake_up() when refcount is already zero.

To prevent that use a completion instead of wait_queue as the ubufs
notification mechanism. wait_for_completion() guarantees that there will
be complete() call prior to its return.

We also need to reinit completion in vhost_net_flush(), because
refcnt == 0 does not mean freeing in that case.

Cc: sta...@vger.kernel.org
Fixes: 0ad8b480d6ee9 ("vhost: fix ref cnt checking deadlock")
Reported-by: Andrey Ryabinin <a...@yandex-team.com>
Suggested-by: Andrey Smetanin <asmeta...@yandex-team.ru>
Suggested-by: Hillf Danton <hdan...@sina.com>
Tested-by: Lei Yang <leiy...@redhat.com> (v1)
Signed-off-by: Nikolay Kuratov <k...@yandex-team.ru>
---
v2:
* move reinit_completion() into vhost_net_flush(), thanks
  to Hillf Danton
* add Tested-by: Lei Yang
* check that usages of put_and_wait() are consistent across
  LTS kernels

 drivers/vhost/net.c | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 7cbfc7d718b3..69e1bfb9627e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -94,7 +94,7 @@ struct vhost_net_ubuf_ref {
         * >1: outstanding ubufs
         */
        atomic_t refcount;
-       wait_queue_head_t wait;
+       struct completion wait;
        struct vhost_virtqueue *vq;
 };
 
@@ -240,7 +240,7 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
        if (!ubufs)
                return ERR_PTR(-ENOMEM);
        atomic_set(&ubufs->refcount, 1);
-       init_waitqueue_head(&ubufs->wait);
+       init_completion(&ubufs->wait);
        ubufs->vq = vq;
        return ubufs;
 }
@@ -249,14 +249,14 @@ static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref 
*ubufs)
 {
        int r = atomic_sub_return(1, &ubufs->refcount);
        if (unlikely(!r))
-               wake_up(&ubufs->wait);
+               complete_all(&ubufs->wait);
        return r;
 }
 
 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
 {
        vhost_net_ubuf_put(ubufs);
-       wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
+       wait_for_completion(&ubufs->wait);
 }
 
 static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
@@ -1381,6 +1381,7 @@ static void vhost_net_flush(struct vhost_net *n)
                mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
                n->tx_flush = false;
                atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
+               reinit_completion(&n->vqs[VHOST_NET_VQ_TX].ubufs->wait);
                mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
        }
 }
-- 
2.34.1


Reply via email to