Log write descriptors for the control queue, leveraging
vhost_scsi_get_desc() and vhost_get_vq_desc() to retrieve the array of
write descriptors to obtain the log buffer.

For Task Management Requests, similar to the I/O queue, store the log
buffer during the submission path and log it in the completion or error
handling path.

For Asynchronous Notifications, only the submission path is involved.

Suggested-by: Joao Martins <joao.m.mart...@oracle.com>
Signed-off-by: Dongli Zhang <dongli.zh...@oracle.com>
---
 drivers/vhost/scsi.c | 51 +++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 46 insertions(+), 5 deletions(-)

diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index d678eaf4ca68..21c2d07b806a 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -225,6 +225,12 @@ struct vhost_scsi_tmf {
        struct iovec resp_iov;
        int in_iovs;
        int vq_desc;
+
+       /*
+        * Dirty write descriptors of this command.
+        */
+       struct vhost_log *tmf_log;
+       unsigned int tmf_log_num;
 };
 
 /*
@@ -378,6 +384,11 @@ static void vhost_scsi_release_tmf_res(struct 
vhost_scsi_tmf *tmf)
 {
        struct vhost_scsi_inflight *inflight = tmf->inflight;
 
+       if (tmf->tmf_log_num) {
+               kfree(tmf->tmf_log);
+               tmf->tmf_log_num = 0;
+       }
+
        kfree(tmf);
        vhost_scsi_put_inflight(inflight);
 }
@@ -1348,6 +1359,14 @@ static void vhost_scsi_tmf_resp_work(struct vhost_work 
*work)
 
        vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
                                 tmf->vq_desc, &tmf->resp_iov, resp_code);
+
+       if (unlikely(tmf->tmf_log_num)) {
+               mutex_lock(&tmf->svq->vq.mutex);
+               vhost_scsi_log_write(&tmf->svq->vq, tmf->tmf_log,
+                                    tmf->tmf_log_num);
+               mutex_unlock(&tmf->svq->vq.mutex);
+       }
+
        vhost_scsi_release_tmf_res(tmf);
 }
 
@@ -1369,7 +1388,8 @@ static void
 vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
                      struct vhost_virtqueue *vq,
                      struct virtio_scsi_ctrl_tmf_req *vtmf,
-                     struct vhost_scsi_ctx *vc)
+                     struct vhost_scsi_ctx *vc,
+                     struct vhost_log *log, unsigned int log_num)
 {
        struct vhost_scsi_virtqueue *svq = container_of(vq,
                                        struct vhost_scsi_virtqueue, vq);
@@ -1397,6 +1417,16 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct 
vhost_scsi_tpg *tpg,
        tmf->in_iovs = vc->in;
        tmf->inflight = vhost_scsi_get_inflight(vq);
 
+       if (unlikely(log && log_num)) {
+               tmf->tmf_log = kmalloc_array(log_num, sizeof(*tmf->tmf_log),
+                                            GFP_KERNEL);
+               if (tmf->tmf_log) {
+                       memcpy(tmf->tmf_log, log, sizeof(*tmf->tmf_log) * 
log_num);
+                       tmf->tmf_log_num = log_num;
+               } else
+                       pr_err("vhost_scsi tmf log allocation error\n");
+       }
+
        if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
                              vhost_buf_to_lun(vtmf->lun), NULL,
                              TMR_LUN_RESET, GFP_KERNEL, 0,
@@ -1410,6 +1440,7 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct 
vhost_scsi_tpg *tpg,
 send_reject:
        vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
                                 VIRTIO_SCSI_S_FUNCTION_REJECTED);
+       vhost_scsi_log_write(vq, log, log_num);
 }
 
 static void
@@ -1446,6 +1477,8 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct 
vhost_virtqueue *vq)
        struct vhost_scsi_ctx vc;
        size_t typ_size;
        int ret, c = 0;
+       struct vhost_log *vq_log;
+       unsigned int log_num;
 
        mutex_lock(&vq->mutex);
        /*
@@ -1459,8 +1492,11 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct 
vhost_virtqueue *vq)
 
        vhost_disable_notify(&vs->dev, vq);
 
+       vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
+               vq->log : NULL;
+
        do {
-               ret = vhost_scsi_get_desc(vs, vq, &vc, NULL, NULL);
+               ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num);
                if (ret)
                        goto err;
 
@@ -1524,9 +1560,12 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct 
vhost_virtqueue *vq)
                        goto err;
 
                if (v_req.type == VIRTIO_SCSI_T_TMF)
-                       vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
-               else
+                       vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc,
+                                             vq_log, log_num);
+               else {
                        vhost_scsi_send_an_resp(vs, vq, &vc);
+                       vhost_scsi_log_write(vq, vq_log, log_num);
+               }
 err:
                /*
                 * ENXIO:  No more requests, or read error, wait for next kick
@@ -1536,8 +1575,10 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct 
vhost_virtqueue *vq)
                 */
                if (ret == -ENXIO)
                        break;
-               else if (ret == -EIO)
+               else if (ret == -EIO) {
                        vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
+                       vhost_scsi_log_write(vq, vq_log, log_num);
+               }
        } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
 out:
        mutex_unlock(&vq->mutex);
-- 
2.39.3


Reply via email to