On Fri, Dec 14, 2018 at 09:35:11AM -0800, Bart Van Assche wrote:
> Commit b5b6e8c8d3b4 ("scsi: virtio_scsi: fix IO hang caused by automatic
> irq vector affinity") removed all virtio_scsi hostdata users. Since the
> SCSI host data is no longer used, also remove the host data itself.
> 
> Cc: Paolo Bonzini <pbonz...@redhat.com>
> Cc: Christoph Hellwig <h...@lst.de>
> Cc: Ming Lei <ming....@redhat.com>
> Cc: Hannes Reinecke <h...@suse.de>
> Signed-off-by: Bart Van Assche <bvanass...@acm.org>
> ---
>  drivers/scsi/virtio_scsi.c | 52 --------------------------------------
>  1 file changed, 52 deletions(-)
> 
> diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
> index 1c72db94270e..198af631244c 100644
> --- a/drivers/scsi/virtio_scsi.c
> +++ b/drivers/scsi/virtio_scsi.c
> @@ -68,33 +68,6 @@ struct virtio_scsi_vq {
>       struct virtqueue *vq;
>  };
>  
> -/*
> - * Per-target queue state.
> - *
> - * This struct holds the data needed by the queue steering policy.  When a
> - * target is sent multiple requests, we need to drive them to the same queue 
> so
> - * that FIFO processing order is kept.  However, if a target was idle, we can
> - * choose a queue arbitrarily.  In this case the queue is chosen according to
> - * the current VCPU, so the driver expects the number of request queues to be
> - * equal to the number of VCPUs.  This makes it easy and fast to select the
> - * queue, and also lets the driver optimize the IRQ affinity for the 
> virtqueues
> - * (each virtqueue's affinity is set to the CPU that "owns" the queue).
> - *
> - * tgt_seq is held to serialize reading and writing req_vq.
> - *
> - * Decrements of reqs are never concurrent with writes of req_vq: before the
> - * decrement reqs will be != 0; after the decrement the virtqueue completion
> - * routine will not use the req_vq so it can be changed by a new request.
> - * Thus they can happen outside the tgt_seq, provided of course we make reqs
> - * an atomic_t.
> - */
> -struct virtio_scsi_target_state {
> -     seqcount_t tgt_seq;
> -
> -     /* Currently active virtqueue for requests sent to this target. */
> -     struct virtio_scsi_vq *req_vq;
> -};
> -
>  /* Driver instance state */
>  struct virtio_scsi {
>       struct virtio_device *vdev;
> @@ -693,29 +666,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
>       return virtscsi_tmf(vscsi, cmd);
>  }
>  
> -static int virtscsi_target_alloc(struct scsi_target *starget)
> -{
> -     struct Scsi_Host *sh = dev_to_shost(starget->dev.parent);
> -     struct virtio_scsi *vscsi = shost_priv(sh);
> -
> -     struct virtio_scsi_target_state *tgt =
> -                             kmalloc(sizeof(*tgt), GFP_KERNEL);
> -     if (!tgt)
> -             return -ENOMEM;
> -
> -     seqcount_init(&tgt->tgt_seq);
> -     tgt->req_vq = &vscsi->req_vqs[0];
> -
> -     starget->hostdata = tgt;
> -     return 0;
> -}
> -
> -static void virtscsi_target_destroy(struct scsi_target *starget)
> -{
> -     struct virtio_scsi_target_state *tgt = starget->hostdata;
> -     kfree(tgt);
> -}
> -
>  static int virtscsi_map_queues(struct Scsi_Host *shost)
>  {
>       struct virtio_scsi *vscsi = shost_priv(shost);
> @@ -748,8 +698,6 @@ static struct scsi_host_template virtscsi_host_template = 
> {
>  
>       .dma_boundary = UINT_MAX,
>       .use_clustering = ENABLE_CLUSTERING,
> -     .target_alloc = virtscsi_target_alloc,
> -     .target_destroy = virtscsi_target_destroy,
>       .map_queues = virtscsi_map_queues,
>       .track_queue_depth = 1,
>       .force_blk_mq = 1,
> -- 
> 2.20.0.405.gbc1bbc6f85-goog
> 

Reviewed-by: Ming Lei <ming....@redhat.com>

Thanks,
Ming

Reply via email to