This patch allows userspace to change the vq to worker mapping while it's
in use so tools can do this setup post device creation if needed.

Signed-off-by: Mike Christie <michael.chris...@oracle.com>
---

 drivers/vhost/vhost.c      | 102 +++++++++++++++++++++++++------------
 drivers/vhost/vhost.h      |   2 +-
 include/uapi/linux/vhost.h |   2 +-
 3 files changed, 71 insertions(+), 35 deletions(-)

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 20bf67a846f1..f47710a77853 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -232,12 +232,9 @@ void vhost_poll_stop(struct vhost_poll *poll)
 }
 EXPORT_SYMBOL_GPL(vhost_poll_stop);
 
-static void vhost_work_queue_on(struct vhost_worker *worker,
-                               struct vhost_work *work)
+static void vhost_worker_work_queue(struct vhost_worker *worker,
+                                   struct vhost_work *work)
 {
-       if (!worker)
-               return;
-
        if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
                /* We can only add the work to the list after we're
                 * sure it was not in the list.
@@ -248,31 +245,45 @@ static void vhost_work_queue_on(struct vhost_worker 
*worker,
        }
 }
 
-static void vhost_work_flush_on(struct vhost_worker *worker)
+void vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
 {
-       struct vhost_flush_struct flush;
+       struct vhost_worker *worker;
 
-       if (!worker)
-               return;
+       rcu_read_lock();
+       worker = rcu_dereference(vq->worker);
+       if (worker)
+               vhost_worker_work_queue(worker, work);
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
 
-       init_completion(&flush.wait_event);
-       vhost_work_init(&flush.work, vhost_flush_work);
+static void vhost_worker_flush_queue(struct vhost_worker *worker,
+                                    struct vhost_flush_struct *flush)
+{
+       init_completion(&flush->wait_event);
+       vhost_work_init(&flush->work, vhost_flush_work);
 
-       vhost_work_queue_on(worker, &flush.work);
-       wait_for_completion(&flush.wait_event);
+       vhost_worker_work_queue(worker, &flush->work);
 }
 
 void vhost_vq_work_flush(struct vhost_virtqueue *vq)
 {
-       vhost_work_flush_on(vq->worker);
-}
-EXPORT_SYMBOL_GPL(vhost_vq_work_flush);
+       struct vhost_flush_struct flush;
+       struct vhost_worker *worker;
 
-void vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
-{
-       vhost_work_queue_on(vq->worker, work);
+       rcu_read_lock();
+       worker = rcu_dereference(vq->worker);
+       if (!worker) {
+               rcu_read_unlock();
+               return;
+       }
+
+       vhost_worker_flush_queue(worker, &flush);
+       rcu_read_unlock();
+
+       wait_for_completion(&flush.wait_event);
 }
-EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
+EXPORT_SYMBOL_GPL(vhost_vq_work_flush);
 
 /* Flush any work that has been scheduled. When calling this, don't hold any
  * locks that are also used by the callback. */
@@ -285,7 +296,16 @@ EXPORT_SYMBOL_GPL(vhost_poll_flush);
 /* A lockless hint for busy polling code to exit the loop */
 bool vhost_vq_has_work(struct vhost_virtqueue *vq)
 {
-       return vq->worker && !llist_empty(&vq->worker->work_list);
+       struct vhost_worker *worker;
+       bool has_work = false;
+
+       rcu_read_lock();
+       worker = rcu_dereference(vq->worker);
+       if (worker && !llist_empty(&worker->work_list))
+               has_work = true;
+       rcu_read_unlock();
+
+       return has_work;
 }
 EXPORT_SYMBOL_GPL(vhost_vq_has_work);
 
@@ -510,7 +530,7 @@ void vhost_dev_init(struct vhost_dev *dev,
                vq->log = NULL;
                vq->indirect = NULL;
                vq->heads = NULL;
-               vq->worker = NULL;
+               rcu_assign_pointer(vq->worker, NULL);
                vq->dev = dev;
                mutex_init(&vq->mutex);
                vhost_vq_reset(dev, vq);
@@ -590,11 +610,32 @@ static void vhost_worker_put(struct vhost_dev *dev, 
struct vhost_worker *worker)
        kfree(worker);
 }
 
-static void vhost_vq_detach_worker(struct vhost_virtqueue *vq)
+static void vhost_vq_swap_worker(struct vhost_virtqueue *vq,
+                                struct vhost_worker *new_worker, bool flush)
 {
-       if (vq->worker)
-               vhost_worker_put(vq->dev, vq->worker);
-       vq->worker = NULL;
+       struct vhost_flush_struct flush_work;
+       struct vhost_worker *old_worker;
+
+       old_worker = rcu_dereference_check(vq->worker,
+                                          lockdep_is_held(&vq->dev->mutex));
+       rcu_assign_pointer(vq->worker, new_worker);
+
+       if (!old_worker)
+               return;
+
+       if (flush) {
+               /*
+                * For dev cleanup we won't have work running, but for the
+                * dynamic attach case we might so make sure we see the new
+                * worker and there is no work in the old worker.
+                */
+               synchronize_rcu();
+
+               vhost_worker_flush_queue(old_worker, &flush_work);
+               wait_for_completion(&flush_work.wait_event);
+       }
+
+       vhost_worker_put(vq->dev, old_worker);
 }
 
 static int vhost_workers_idr_iter(int id, void *worker, void *dev)
@@ -611,7 +652,7 @@ static void vhost_workers_free(struct vhost_dev *dev)
                return;
 
        for (i = 0; i < dev->nvqs; i++)
-               vhost_vq_detach_worker(dev->vqs[i]);
+               vhost_vq_swap_worker(dev->vqs[i], NULL, false);
 
        idr_for_each(&dev->worker_idr, vhost_workers_idr_iter, dev);
 }
@@ -667,18 +708,13 @@ static int vhost_vq_attach_worker(struct vhost_virtqueue 
*vq,
        if (!dev->use_worker)
                return -EINVAL;
 
-       /* We don't support setting a worker on an active vq */
-       if (vq->private_data)
-               return -EBUSY;
-
        worker = idr_find(&dev->worker_idr, info->worker_id);
        if (!worker)
                return -ENODEV;
 
        refcount_inc(&worker->refcount);
 
-       vhost_vq_detach_worker(vq);
-       vq->worker = worker;
+       vhost_vq_swap_worker(vq, worker, true);
        return 0;
 }
 
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index b3786e3537f1..607e95887942 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -81,7 +81,7 @@ struct vhost_vring_call {
 /* The virtqueue structure describes a queue attached to a device. */
 struct vhost_virtqueue {
        struct vhost_dev *dev;
-       struct vhost_worker *worker;
+       struct vhost_worker __rcu *worker;
 
        /* The actual ring of buffers. */
        struct mutex mutex;
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 117ea92b3925..e0221c8ce877 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -88,7 +88,7 @@
 #define VHOST_SET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x13, struct 
vhost_vring_state)
 #define VHOST_GET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x14, struct 
vhost_vring_state)
 /* Attach a vhost_worker created with VHOST_NEW_WORKER to one of the device's
- * virtqueues. This must be done before the virtqueue is active.
+ * virtqueues.
  */
 #define VHOST_ATTACH_VRING_WORKER _IOR(VHOST_VIRTIO, 0x15,             \
                                       struct vhost_vring_worker)
-- 
2.25.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to