virtio core only supports virtual addresses, dma is completed in virtio
core.

In some scenarios (such as the AF_XDP), the memory is allocated
and DMA is completed in advance, so it is necessary for us to support
passing the DMA address to virtio core.

Drives can use sg->dma_address to pass the mapped dma address to virtio
core. If one sg->dma_address is used then all sgs must use sg->dma_address,
otherwise all dma_address must be null.

On the indirect path, if dma_address is used, desc_state.indir_desc will
be mixed with VRING_INDIRECT_NO_DMA_MAP. So when do unmap, we can pass it.

Signed-off-by: Xuan Zhuo <xuanz...@linux.alibaba.com>
---
 drivers/virtio/virtio_ring.c | 18 ++++++++++++------
 1 file changed, 12 insertions(+), 6 deletions(-)

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 11827d2e56a8..b23d301effb5 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1331,6 +1331,7 @@ static int virtqueue_add_indirect_packed(struct 
vring_virtqueue *vq,
        unsigned int i, n;
        u16 head, id;
        dma_addr_t addr;
+       bool do_map;
 
        head = vq->packed.next_avail_idx;
        desc = alloc_indirect_packed(total_sg, gfp);
@@ -1348,7 +1349,8 @@ static int virtqueue_add_indirect_packed(struct 
vring_virtqueue *vq,
        id = vq->free_head;
        BUG_ON(id == vq->packed.vring.num);
 
-       if (virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs))
+       do_map = !sgs[0]->dma_address;
+       if (do_map && virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs))
                return -ENOMEM;
 
        for (n = 0; n < out_sgs + in_sgs; n++) {
@@ -1408,7 +1410,7 @@ static int virtqueue_add_indirect_packed(struct 
vring_virtqueue *vq,
        /* Store token and indirect buffer state. */
        vq->packed.desc_state[id].num = 1;
        vq->packed.desc_state[id].data = data;
-       vq->packed.desc_state[id].indir_desc = desc;
+       vq->packed.desc_state[id].indir_desc = desc_mix_dma_map(do_map, desc);
        vq->packed.desc_state[id].last = id;
 
        vq->num_added += 1;
@@ -1419,7 +1421,8 @@ static int virtqueue_add_indirect_packed(struct 
vring_virtqueue *vq,
        return 0;
 
 unmap_release:
-       virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
+       if (do_map)
+               virtqueue_unmap_sgs(vq, sgs, total_sg, out_sgs, in_sgs);
 
        kfree(desc);
 
@@ -1633,14 +1636,17 @@ static void detach_buf_packed(struct vring_virtqueue 
*vq,
        }
 
        if (vq->indirect) {
+               struct vring_packed_desc *mix;
                u32 len;
 
                /* Free the indirect table, if any, now that it's unmapped. */
-               desc = state->indir_desc;
-               if (!desc)
+               mix = state->indir_desc;
+               if (!mix)
                        return;
 
-               if (vq->use_dma_api) {
+               desc = desc_rm_dma_map(mix);
+
+               if (vq->use_dma_api && desc_map_inter(mix)) {
                        len = vq->packed.desc_extra[id].len;
                        for (i = 0; i < len / sizeof(struct vring_packed_desc);
                                        i++)
-- 
2.32.0.3.g01195cf9f

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to