This patch unifies the logic of virtqueue_poll() and more_used() for
better code reusing and ease the future in order implementation.

Signed-off-by: Jason Wang <jasow...@redhat.com>
---
 drivers/virtio/virtio_ring.c | 48 +++++++++++++++---------------------
 1 file changed, 20 insertions(+), 28 deletions(-)

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 1c6b63812bf8..9172f3a089a0 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -802,12 +802,18 @@ static void detach_buf_split(struct vring_virtqueue *vq, 
unsigned int head,
        }
 }
 
-static bool more_used_split(const struct vring_virtqueue *vq)
+static bool virtqueue_poll_split(const struct vring_virtqueue *vq,
+                                unsigned int last_used_idx)
 {
-       return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
+       return (u16)last_used_idx != virtio16_to_cpu(vq->vq.vdev,
                        vq->split.vring.used->idx);
 }
 
+static bool more_used_split(const struct vring_virtqueue *vq)
+{
+       return virtqueue_poll_split(vq, vq->last_used_idx);
+}
+
 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
                                         unsigned int *len,
                                         void **ctx)
@@ -915,13 +921,6 @@ static unsigned int 
virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
        return last_used_idx;
 }
 
-static bool virtqueue_poll_split(struct vring_virtqueue *vq,
-                                unsigned int last_used_idx)
-{
-       return (u16)last_used_idx != virtio16_to_cpu(vq->vq.vdev,
-                       vq->split.vring.used->idx);
-}
-
 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
@@ -1711,16 +1710,20 @@ static inline bool is_used_desc_packed(const struct 
vring_virtqueue *vq,
        return avail == used && used == used_wrap_counter;
 }
 
-static bool more_used_packed(const struct vring_virtqueue *vq)
+static bool virtqueue_poll_packed(const struct vring_virtqueue *vq, u16 
off_wrap)
 {
-       u16 last_used;
-       u16 last_used_idx;
-       bool used_wrap_counter;
+       bool wrap_counter;
+       u16 used_idx;
 
-       last_used_idx = READ_ONCE(vq->last_used_idx);
-       last_used = packed_last_used(last_used_idx);
-       used_wrap_counter = packed_used_wrap_counter(last_used_idx);
-       return is_used_desc_packed(vq, last_used, used_wrap_counter);
+       wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
+       used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
+
+       return is_used_desc_packed(vq, used_idx, wrap_counter);
+}
+
+static bool more_used_packed(const struct vring_virtqueue *vq)
+{
+       return virtqueue_poll_packed(vq, READ_ONCE(vq->last_used_idx));
 }
 
 static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
@@ -1844,17 +1847,6 @@ static unsigned int 
virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
        return vq->last_used_idx;
 }
 
-static bool virtqueue_poll_packed(struct vring_virtqueue *vq, u16 off_wrap)
-{
-       bool wrap_counter;
-       u16 used_idx;
-
-       wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
-       used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
-
-       return is_used_desc_packed(vq, used_idx, wrap_counter);
-}
-
 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
-- 
2.42.0


Reply via email to