<20230626232307.97930-3-michael.chris...@oracle.com>
Subject: [PATCH RHEL9 COMMIT] ms/vhost: dynamically allocate vhost_worker

The commit is pushed to "branch-rh9-5.14.0-362.8.1.vz9.35.x-ovz" and will 
appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh9-5.14.0-362.8.1.vz9.35.5
------>
commit a25b680efbd9d290f6cce0190ef6e80c9663594d
Author: Mike Christie <michael.chris...@oracle.com>
Date:   Thu Jan 4 20:02:11 2024 +0300

    ms/vhost: dynamically allocate vhost_worker
    
    This patchset allows us to allocate multiple workers, so this has us
    move from the vhost_worker that's embedded in the vhost_dev to
    dynamically allocating it.
    
    Signed-off-by: Mike Christie <michael.chris...@oracle.com>
    Message-Id: <20230626232307.97930-3-michael.chris...@oracle.com>
    Signed-off-by: Michael S. Tsirkin <m...@redhat.com>
    
    =========
    Half of this commit is already present. Add the rest.
    
    (cherry picked from ms commit c011bb669ddc)
    https://virtuozzo.atlassian.net/browse/PSBM-152375
    https://virtuozzo.atlassian.net/browse/PSBM-139414
    Signed-off-by: Andrey Zhadchenko <andrey.zhadche...@virtuozzo.com>
    
    Feature: vhost-blk: in-kernel accelerator for virtio-blk guests
---
 drivers/vhost/vhost.c | 20 ++++++++++++--------
 drivers/vhost/vhost.h |  2 +-
 2 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 3421525c8e20..2dd032fbf642 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -235,21 +235,23 @@ void vhost_dev_flush(struct vhost_dev *dev)
 {
        struct vhost_flush_struct flush;
 
-       if (dev->worker) {
-               init_completion(&flush.wait_event);
-               vhost_work_init(&flush.work, vhost_flush_work);
+       init_completion(&flush.wait_event);
+       vhost_work_init(&flush.work, vhost_flush_work);
 
-               vhost_work_queue(dev, &flush.work);
+       if (vhost_work_queue(dev, &flush.work))
                wait_for_completion(&flush.wait_event);
-       }
 }
 EXPORT_SYMBOL_GPL(vhost_dev_flush);
 
-void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
+bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
 {
        if (!dev->worker)
-               return;
-
+               return false;
+       /*
+        * vsock can queue while we do a VHOST_SET_OWNER, so we have a smp_wmb
+        * when setting up the worker. We don't have a smp_rmb here because
+        * test_and_set_bit gives us a mb already.
+        */
        if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
                /* We can only add the work to the list after we're
                 * sure it was not in the list.
@@ -258,6 +260,8 @@ void vhost_work_queue(struct vhost_dev *dev, struct 
vhost_work *work)
                llist_add(&work->node, &dev->worker->work_list);
                wake_up_process(dev->worker->task);
        }
+
+       return true;
 }
 EXPORT_SYMBOL_GPL(vhost_work_queue);
 
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 2df99e657ec6..9ff8c0afcead 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -44,7 +44,7 @@ struct vhost_poll {
 };
 
 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
-void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
+bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
 bool vhost_has_work(struct vhost_dev *dev);
 
 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to