This patch introduces virtqueue ops which is a set of the callbacks that will be called for different queue layout or features. This would help to avoid branches for split/packed and will ease the future implementation like in order.
Signed-off-by: Jason Wang <jasow...@redhat.com> --- drivers/virtio/virtio_ring.c | 96 +++++++++++++++++++++++++----------- 1 file changed, 67 insertions(+), 29 deletions(-) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index a2884eae14d9..ce1dc90ee89d 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -159,9 +159,30 @@ struct vring_virtqueue_packed { size_t event_size_in_bytes; }; +struct vring_virtqueue; + +struct virtqueue_ops { + int (*add)(struct vring_virtqueue *_vq, struct scatterlist *sgs[], + unsigned int total_sg, unsigned int out_sgs, + unsigned int in_sgs, void *data, + void *ctx, bool premapped, gfp_t gfp); + void *(*get)(struct vring_virtqueue *vq, unsigned int *len, void **ctx); + bool (*kick_prepare)(struct vring_virtqueue *vq); + void (*disable_cb)(struct vring_virtqueue *vq); + bool (*enable_cb_delayed)(struct vring_virtqueue *vq); + unsigned int (*enable_cb_prepare)(struct vring_virtqueue *vq); + bool (*poll)(const struct vring_virtqueue *vq, u16 last_used_idx); + void *(*detach_unused_buf)(struct vring_virtqueue *vq); + bool (*more_used)(const struct vring_virtqueue *vq); + int (*resize)(struct vring_virtqueue *vq, u32 num); + void (*reset)(struct vring_virtqueue *vq); +}; + struct vring_virtqueue { struct virtqueue vq; + struct virtqueue_ops *ops; + /* Is this a packed ring? */ bool packed_ring; @@ -1116,6 +1137,8 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split, return 0; } +struct virtqueue_ops split_ops; + static struct virtqueue *__vring_new_virtqueue_split(unsigned int index, struct vring_virtqueue_split *vring_split, struct virtio_device *vdev, @@ -1134,6 +1157,7 @@ static struct virtqueue *__vring_new_virtqueue_split(unsigned int index, return NULL; vq->packed_ring = false; + vq->ops = &split_ops; vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; @@ -2076,6 +2100,8 @@ static void virtqueue_reset_packed(struct vring_virtqueue *vq) virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback); } +struct virtqueue_ops packed_ops; + static struct virtqueue *__vring_new_virtqueue_packed(unsigned int index, struct vring_virtqueue_packed *vring_packed, struct virtio_device *vdev, @@ -2107,6 +2133,7 @@ static struct virtqueue *__vring_new_virtqueue_packed(unsigned int index, vq->broken = false; #endif vq->packed_ring = true; + vq->ops = &packed_ops; vq->dma_dev = dma_dev; vq->use_dma_api = vring_use_dma_api(vdev); @@ -2194,6 +2221,34 @@ static int virtqueue_resize_packed(struct vring_virtqueue *vq, u32 num) return -ENOMEM; } +struct virtqueue_ops split_ops = { + .add = virtqueue_add_split, + .get = virtqueue_get_buf_ctx_split, + .kick_prepare = virtqueue_kick_prepare_split, + .disable_cb = virtqueue_disable_cb_split, + .enable_cb_delayed = virtqueue_enable_cb_delayed_split, + .enable_cb_prepare = virtqueue_enable_cb_prepare_split, + .poll = virtqueue_poll_split, + .detach_unused_buf = virtqueue_detach_unused_buf_split, + .more_used = more_used_split, + .resize = virtqueue_resize_split, + .reset = virtqueue_reset_split, +}; + +struct virtqueue_ops packed_ops = { + .add = virtqueue_add_packed, + .get = virtqueue_get_buf_ctx_packed, + .kick_prepare = virtqueue_kick_prepare_packed, + .disable_cb = virtqueue_disable_cb_packed, + .enable_cb_delayed = virtqueue_enable_cb_delayed_packed, + .enable_cb_prepare = virtqueue_enable_cb_prepare_packed, + .poll = virtqueue_poll_packed, + .detach_unused_buf = virtqueue_detach_unused_buf_packed, + .more_used = more_used_packed, + .resize = virtqueue_resize_packed, + .reset = virtqueue_reset_packed, +}; + static int virtqueue_disable_and_recycle(struct virtqueue *_vq, void (*recycle)(struct virtqueue *vq, void *buf)) { @@ -2248,10 +2303,8 @@ static inline int virtqueue_add(struct virtqueue *_vq, { struct vring_virtqueue *vq = to_vvq(_vq); - return vq->packed_ring ? virtqueue_add_packed(vq, sgs, total_sg, - out_sgs, in_sgs, data, ctx, premapped, gfp) : - virtqueue_add_split(vq, sgs, total_sg, - out_sgs, in_sgs, data, ctx, premapped, gfp); + return vq->ops->add(vq, sgs, total_sg, + out_sgs, in_sgs, data, ctx, premapped, gfp); } /** @@ -2437,8 +2490,7 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); - return vq->packed_ring ? virtqueue_kick_prepare_packed(vq) : - virtqueue_kick_prepare_split(vq); + return vq->ops->kick_prepare(vq); } EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); @@ -2508,8 +2560,7 @@ void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, { struct vring_virtqueue *vq = to_vvq(_vq); - return vq->packed_ring ? virtqueue_get_buf_ctx_packed(vq, len, ctx) : - virtqueue_get_buf_ctx_split(vq, len, ctx); + return vq->ops->get(vq, len, ctx); } EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx); @@ -2531,10 +2582,7 @@ void virtqueue_disable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); - if (vq->packed_ring) - virtqueue_disable_cb_packed(vq); - else - virtqueue_disable_cb_split(vq); + return vq->ops->disable_cb(vq); } EXPORT_SYMBOL_GPL(virtqueue_disable_cb); @@ -2557,8 +2605,7 @@ unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq) if (vq->event_triggered) vq->event_triggered = false; - return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(vq) : - virtqueue_enable_cb_prepare_split(vq); + return vq->ops->enable_cb_prepare(vq); } EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); @@ -2579,8 +2626,7 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx) return false; virtio_mb(vq->weak_barriers); - return vq->packed_ring ? virtqueue_poll_packed(vq, last_used_idx) : - virtqueue_poll_split(vq, last_used_idx); + return vq->ops->poll(vq, last_used_idx); } EXPORT_SYMBOL_GPL(virtqueue_poll); @@ -2623,8 +2669,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) if (vq->event_triggered) vq->event_triggered = false; - return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(vq) : - virtqueue_enable_cb_delayed_split(vq); + return vq->ops->enable_cb_delayed(vq); } EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); @@ -2640,14 +2685,13 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); - return vq->packed_ring ? virtqueue_detach_unused_buf_packed(vq) : - virtqueue_detach_unused_buf_split(vq); + return vq->ops->detach_unused_buf(vq); } EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); static inline bool more_used(const struct vring_virtqueue *vq) { - return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq); + return vq->ops->more_used(vq); } /** @@ -2785,10 +2829,7 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num, if (recycle_done) recycle_done(_vq); - if (vq->packed_ring) - err = virtqueue_resize_packed(vq, num); - else - err = virtqueue_resize_split(vq, num); + err = vq->ops->resize(vq, num); return virtqueue_enable_after_reset(_vq); } @@ -2822,10 +2863,7 @@ int virtqueue_reset(struct virtqueue *_vq, if (recycle_done) recycle_done(_vq); - if (vq->packed_ring) - virtqueue_reset_packed(vq); - else - virtqueue_reset_split(vq); + vq->ops->reset(vq); return virtqueue_enable_after_reset(_vq); } -- 2.42.0