Implement the insertion of available buffers in the descriptor area of
packed shadow virtqueues. It takes into account descriptor chains, but
does not consider indirect descriptors.

Enable the packed SVQ to forward the descriptors to the device.

Signed-off-by: Sahil Siddiq <sahil...@proton.me>
---
Changes from v4 -> v5:
- This was commit #2 in v4. This has been reordered to commit #3
  based on review comments.
- vhost-shadow-virtqueue.c:
  (vhost_svq_valid_features): Move addition of enums to commit #6
  based on review comments.
  (vhost_svq_add_packed): Set head_idx to buffer id instead of vring's
  index.
  (vhost_svq_kick): Split into vhost_svq_kick_split and
  vhost_svq_kick_packed.
  (vhost_svq_add): Use new vhost_svq_kick_* functions.

 hw/virtio/vhost-shadow-virtqueue.c | 117 +++++++++++++++++++++++++++--
 1 file changed, 112 insertions(+), 5 deletions(-)

diff --git a/hw/virtio/vhost-shadow-virtqueue.c 
b/hw/virtio/vhost-shadow-virtqueue.c
index 4f74ad402a..6e16cd4bdf 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -193,10 +193,83 @@ static void vhost_svq_add_split(VhostShadowVirtqueue *svq,
     /* Update the avail index after write the descriptor */
     smp_wmb();
     avail->idx = cpu_to_le16(svq->shadow_avail_idx);
+}
+
+/**
+ * Write descriptors to SVQ packed vring
+ *
+ * @svq: The shadow virtqueue
+ * @out_sg: The iovec to the guest
+ * @out_num: Outgoing iovec length
+ * @in_sg: The iovec from the guest
+ * @in_num: Incoming iovec length
+ * @sgs: Cache for hwaddr
+ * @head: Saves current free_head
+ */
+static void vhost_svq_add_packed(VhostShadowVirtqueue *svq,
+                                 const struct iovec *out_sg, size_t out_num,
+                                 const struct iovec *in_sg, size_t in_num,
+                                 hwaddr *sgs, unsigned *head)
+{
+    uint16_t id, curr, i, head_flags = 0, head_idx;
+    size_t num = out_num + in_num;
+    unsigned n;
+
+    struct vring_packed_desc *descs = svq->vring_packed.vring.desc;
+
+    head_idx = svq->vring_packed.next_avail_idx;
+    i = head_idx;
+    id = svq->free_head;
+    curr = id;
+    *head = id;
+
+    /* Write descriptors to SVQ packed vring */
+    for (n = 0; n < num; n++) {
+        uint16_t flags = cpu_to_le16(svq->vring_packed.avail_used_flags |
+                                     (n < out_num ? 0 : VRING_DESC_F_WRITE) |
+                                     (n + 1 == num ? 0 : VRING_DESC_F_NEXT));
+        if (i == head_idx) {
+            head_flags = flags;
+        } else {
+            descs[i].flags = flags;
+        }
+
+        descs[i].addr = cpu_to_le64(sgs[n]);
+        descs[i].id = id;
+        if (n < out_num) {
+            descs[i].len = cpu_to_le32(out_sg[n].iov_len);
+        } else {
+            descs[i].len = cpu_to_le32(in_sg[n - out_num].iov_len);
+        }
+
+        curr = cpu_to_le16(svq->desc_next[curr]);
+
+        if (++i >= svq->vring_packed.vring.num) {
+            i = 0;
+            svq->vring_packed.avail_used_flags ^=
+                1 << VRING_PACKED_DESC_F_AVAIL |
+                1 << VRING_PACKED_DESC_F_USED;
+        }
+    }
 
+    if (i <= head_idx) {
+        svq->vring_packed.avail_wrap_counter ^= 1;
+    }
+
+    svq->vring_packed.next_avail_idx = i;
+    svq->shadow_avail_idx = i;
+    svq->free_head = curr;
+
+    /*
+     * A driver MUST NOT make the first descriptor in the list
+     * available before all subsequent descriptors comprising
+     * the list are made available.
+     */
+    smp_wmb();
+    svq->vring_packed.vring.desc[head_idx].flags = head_flags;
 }
 
-static void vhost_svq_kick(VhostShadowVirtqueue *svq)
+static void vhost_svq_kick_split(VhostShadowVirtqueue *svq)
 {
     bool needs_kick;
 
@@ -209,7 +282,8 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
     if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
         uint16_t avail_event = le16_to_cpu(
                 *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]));
-        needs_kick = vring_need_event(avail_event, svq->shadow_avail_idx, 
svq->shadow_avail_idx - 1);
+        needs_kick = vring_need_event(avail_event, svq->shadow_avail_idx,
+                     svq->shadow_avail_idx - 1);
     } else {
         needs_kick =
                 !(svq->vring.used->flags & 
cpu_to_le16(VRING_USED_F_NO_NOTIFY));
@@ -222,6 +296,30 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
     event_notifier_set(&svq->hdev_kick);
 }
 
+static void vhost_svq_kick_packed(VhostShadowVirtqueue *svq)
+{
+    bool needs_kick;
+
+    /*
+     * We need to expose the available array entries before checking
+     * notification suppressions.
+     */
+    smp_mb();
+
+    if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
+        return;
+    } else {
+        needs_kick = (svq->vring_packed.vring.device->flags !=
+                      cpu_to_le16(VRING_PACKED_EVENT_FLAG_DISABLE));
+    }
+
+    if (!needs_kick) {
+        return;
+    }
+
+    event_notifier_set(&svq->hdev_kick);
+}
+
 /**
  * Add an element to a SVQ.
  *
@@ -258,13 +356,22 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct 
iovec *out_sg,
         return -EINVAL;
     }
 
-    vhost_svq_add_split(svq, out_sg, out_num, in_sg,
-                        in_num, sgs, &qemu_head);
+    if (svq->is_packed) {
+        vhost_svq_add_packed(svq, out_sg, out_num, in_sg,
+                             in_num, sgs, &qemu_head);
+    } else {
+        vhost_svq_add_split(svq, out_sg, out_num, in_sg,
+                            in_num, sgs, &qemu_head);
+    }
 
     svq->num_free -= ndescs;
     svq->desc_state[qemu_head].elem = elem;
     svq->desc_state[qemu_head].ndescs = ndescs;
-    vhost_svq_kick(svq);
+    if (svq->is_packed) {
+        vhost_svq_kick_packed(svq);
+    } else {
+        vhost_svq_kick_split(svq);
+    }
     return 0;
 }
 
-- 
2.48.1


Reply via email to