<20230626232307.97930-6-michael.chris...@oracle.com> Subject: [PATCH RHEL9 COMMIT] ms/vhost: take worker or vq instead of dev for queueing
The commit is pushed to "branch-rh9-5.14.0-362.8.1.vz9.35.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git after rh9-5.14.0-362.8.1.vz9.35.5 ------> commit db46389987fc830667ae3eba852011b55129e15b Author: Mike Christie <michael.chris...@oracle.com> Date: Thu Jan 4 20:02:14 2024 +0300 ms/vhost: take worker or vq instead of dev for queueing This patch has the core work queueing function take a worker for when we support multiple workers. It also adds a helper that takes a vq during queueing so modules can control which vq/worker to queue work on. This temp leaves vhost_work_queue. It will be removed when the drivers are converted in the next patches. Signed-off-by: Mike Christie <michael.chris...@oracle.com> Message-Id: <20230626232307.97930-6-michael.chris...@oracle.com> Signed-off-by: Michael S. Tsirkin <m...@redhat.com> ======== (cherry picked from ms commit 0921dddcb589) https://virtuozzo.atlassian.net/browse/PSBM-152375 https://virtuozzo.atlassian.net/browse/PSBM-139414 Signed-off-by: Andrey Zhadchenko <andrey.zhadche...@virtuozzo.com> Feature: vhost-blk: in-kernel accelerator for virtio-blk guests --- drivers/vhost/vhost.c | 44 ++++++++++++++++++++++++++++---------------- drivers/vhost/vhost.h | 1 + 2 files changed, 29 insertions(+), 16 deletions(-) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index dc8592c411d9..f12e8ff91632 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -231,21 +231,10 @@ void vhost_poll_stop(struct vhost_poll *poll) } EXPORT_SYMBOL_GPL(vhost_poll_stop); -void vhost_dev_flush(struct vhost_dev *dev) +static bool vhost_worker_queue(struct vhost_worker *worker, + struct vhost_work *work) { - struct vhost_flush_struct flush; - - init_completion(&flush.wait_event); - vhost_work_init(&flush.work, vhost_flush_work); - - if (vhost_work_queue(dev, &flush.work)) - wait_for_completion(&flush.wait_event); -} -EXPORT_SYMBOL_GPL(vhost_dev_flush); - -bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) -{ - if (!dev->worker) + if (!worker) return false; /* * vsock can queue while we do a VHOST_SET_OWNER, so we have a smp_wmb @@ -257,14 +246,37 @@ bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) * sure it was not in the list. * test_and_set_bit() implies a memory barrier. */ - llist_add(&work->node, &dev->worker->work_list); - wake_up_process(dev->worker->task); + llist_add(&work->node, &worker->work_list); + wake_up_process(worker->task); } return true; } + +bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) +{ + return vhost_worker_queue(dev->worker, work); +} EXPORT_SYMBOL_GPL(vhost_work_queue); +bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work) +{ + return vhost_worker_queue(vq->worker, work); +} +EXPORT_SYMBOL_GPL(vhost_vq_work_queue); + +void vhost_dev_flush(struct vhost_dev *dev) +{ + struct vhost_flush_struct flush; + + init_completion(&flush.wait_event); + vhost_work_init(&flush.work, vhost_flush_work); + + if (vhost_work_queue(dev, &flush.work)) + wait_for_completion(&flush.wait_event); +} +EXPORT_SYMBOL_GPL(vhost_dev_flush); + /* A lockless hint for busy polling code to exit the loop */ bool vhost_vq_has_work(struct vhost_virtqueue *vq) { diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index a76caf82027d..cd249b8d1861 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -198,6 +198,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *, struct vhost_log *log, unsigned int *log_num); void vhost_discard_vq_desc(struct vhost_virtqueue *, int n); +bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work); bool vhost_vq_has_work(struct vhost_virtqueue *vq); bool vhost_vq_is_setup(struct vhost_virtqueue *vq); int vhost_vq_init_access(struct vhost_virtqueue *); _______________________________________________ Devel mailing list Devel@openvz.org https://lists.openvz.org/mailman/listinfo/devel