To make the code readable, introduce vring_need_unmap_buffer() to
replace do_unmap.

   use_dma_api premapped -> vring_need_unmap_buffer()
1. false       false        false
2. true        false        true
3. true        true         false

Signed-off-by: Xuan Zhuo <xuanz...@linux.alibaba.com>
Acked-by: Jason Wang <jasow...@redhat.com>
---
 drivers/virtio/virtio_ring.c | 27 ++++++++++++---------------
 1 file changed, 12 insertions(+), 15 deletions(-)

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 94c442ba844f..c2779e34aac7 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -175,11 +175,6 @@ struct vring_virtqueue {
        /* Do DMA mapping by driver */
        bool premapped;
 
-       /* Do unmap or not for desc. Just when premapped is False and
-        * use_dma_api is true, this is true.
-        */
-       bool do_unmap;
-
        /* Head of free buffer list. */
        unsigned int free_head;
        /* Number we've added since last sync. */
@@ -295,6 +290,11 @@ static bool vring_use_dma_api(const struct virtio_device 
*vdev)
        return false;
 }
 
+static bool vring_need_unmap_buffer(const struct vring_virtqueue *vring)
+{
+       return vring->use_dma_api && !vring->premapped;
+}
+
 size_t virtio_max_dma_size(const struct virtio_device *vdev)
 {
        size_t max_segment_size = SIZE_MAX;
@@ -443,7 +443,7 @@ static void vring_unmap_one_split_indirect(const struct 
vring_virtqueue *vq,
 {
        u16 flags;
 
-       if (!vq->do_unmap)
+       if (!vring_need_unmap_buffer(vq))
                return;
 
        flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
@@ -473,7 +473,7 @@ static unsigned int vring_unmap_one_split(const struct 
vring_virtqueue *vq,
                                 (flags & VRING_DESC_F_WRITE) ?
                                 DMA_FROM_DEVICE : DMA_TO_DEVICE);
        } else {
-               if (!vq->do_unmap)
+               if (!vring_need_unmap_buffer(vq))
                        goto out;
 
                dma_unmap_page(vring_dma_dev(vq),
@@ -641,7 +641,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
        }
        /* Last one doesn't continue. */
        desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
-       if (!indirect && vq->do_unmap)
+       if (!indirect && vring_need_unmap_buffer(vq))
                vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
                        ~VRING_DESC_F_NEXT;
 
@@ -800,7 +800,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, 
unsigned int head,
                                VRING_DESC_F_INDIRECT));
                BUG_ON(len == 0 || len % sizeof(struct vring_desc));
 
-               if (vq->do_unmap) {
+               if (vring_need_unmap_buffer(vq)) {
                        for (j = 0; j < len / sizeof(struct vring_desc); j++)
                                vring_unmap_one_split_indirect(vq, 
&indir_desc[j]);
                }
@@ -1230,7 +1230,7 @@ static void vring_unmap_extra_packed(const struct 
vring_virtqueue *vq,
                                 (flags & VRING_DESC_F_WRITE) ?
                                 DMA_FROM_DEVICE : DMA_TO_DEVICE);
        } else {
-               if (!vq->do_unmap)
+               if (!vring_need_unmap_buffer(vq))
                        return;
 
                dma_unmap_page(vring_dma_dev(vq),
@@ -1245,7 +1245,7 @@ static void vring_unmap_desc_packed(const struct 
vring_virtqueue *vq,
 {
        u16 flags;
 
-       if (!vq->do_unmap)
+       if (!vring_need_unmap_buffer(vq))
                return;
 
        flags = le16_to_cpu(desc->flags);
@@ -1626,7 +1626,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
                if (!desc)
                        return;
 
-               if (vq->do_unmap) {
+               if (vring_need_unmap_buffer(vq)) {
                        len = vq->packed.desc_extra[id].len;
                        for (i = 0; i < len / sizeof(struct vring_packed_desc);
                                        i++)
@@ -2080,7 +2080,6 @@ static struct virtqueue 
*vring_create_virtqueue_packed(struct virtio_device *vde
        vq->dma_dev = dma_dev;
        vq->use_dma_api = vring_use_dma_api(vdev);
        vq->premapped = false;
-       vq->do_unmap = vq->use_dma_api;
 
        vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
                !cfg_vq_get(cfg, ctx);
@@ -2621,7 +2620,6 @@ static struct virtqueue *__vring_new_virtqueue(struct 
virtio_device *vdev,
        vq->dma_dev = tp_cfg->dma_dev;
        vq->use_dma_api = vring_use_dma_api(vdev);
        vq->premapped = false;
-       vq->do_unmap = vq->use_dma_api;
 
        vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
                !cfg_vq_get(cfg, ctx);
@@ -2752,7 +2750,6 @@ int virtqueue_set_dma_premapped(struct virtqueue *_vq)
        }
 
        vq->premapped = true;
-       vq->do_unmap = false;
 
        END_USE(vq);
 
-- 
2.32.0.3.g01195cf9f


Reply via email to