Reviewed-by: Jacek Lawrynowicz <jacek.lawrynow...@linux.intel.com>

On 08.12.2023 17:34, Jeffrey Hugo wrote:
> From: Pranjal Ramajor Asha Kanojiya <quic_pkano...@quicinc.com>
> 
> ->queued field is used to track whether the BO is submitted to hardware for
> DMA or not. Since same information can be retrieved using ->xfer_list field
> of same structure remove ->queued as it is redundant.
> 
> Signed-off-by: Pranjal Ramajor Asha Kanojiya <quic_pkano...@quicinc.com>
> Reviewed-by: Jeffrey Hugo <quic_jh...@quicinc.com>
> Signed-off-by: Jeffrey Hugo <quic_jh...@quicinc.com>
> ---
>  drivers/accel/qaic/qaic.h      |  2 --
>  drivers/accel/qaic/qaic_data.c | 23 +++++++++++------------
>  2 files changed, 11 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
> index 582836f9538f..2b3ef588b717 100644
> --- a/drivers/accel/qaic/qaic.h
> +++ b/drivers/accel/qaic/qaic.h
> @@ -191,8 +191,6 @@ struct qaic_bo {
>       u32                     nr_slice;
>       /* Number of slice that have been transferred by DMA engine */
>       u32                     nr_slice_xfer_done;
> -     /* true = BO is queued for execution, true = BO is not queued */
> -     bool                    queued;
>       /*
>        * If true then user has attached slicing information to this BO by
>        * calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl.
> diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
> index 0c6f1328df68..89ab8fa19315 100644
> --- a/drivers/accel/qaic/qaic_data.c
> +++ b/drivers/accel/qaic/qaic_data.c
> @@ -141,6 +141,11 @@ struct dbc_rsp {
>       __le16  status;
>  } __packed;
>  
> +static inline bool bo_queued(struct qaic_bo *bo)
> +{
> +     return !list_empty(&bo->xfer_list);
> +}
> +
>  inline int get_dbc_req_elem_size(void)
>  {
>       return sizeof(struct dbc_req);
> @@ -648,6 +653,7 @@ static void qaic_init_bo(struct qaic_bo *bo, bool reinit)
>       }
>       complete_all(&bo->xfer_done);
>       INIT_LIST_HEAD(&bo->slices);
> +     INIT_LIST_HEAD(&bo->xfer_list);
>  }
>  
>  static struct qaic_bo *qaic_alloc_init_bo(void)
> @@ -1166,7 +1172,6 @@ static int send_bo_list_to_device(struct qaic_device 
> *qdev, struct drm_file *fil
>       struct bo_slice *slice;
>       unsigned long flags;
>       struct qaic_bo *bo;
> -     bool queued;
>       int i, j;
>       int ret;
>  
> @@ -1198,9 +1203,7 @@ static int send_bo_list_to_device(struct qaic_device 
> *qdev, struct drm_file *fil
>               }
>  
>               spin_lock_irqsave(&dbc->xfer_lock, flags);
> -             queued = bo->queued;
> -             bo->queued = true;
> -             if (queued) {
> +             if (bo_queued(bo)) {
>                       spin_unlock_irqrestore(&dbc->xfer_lock, flags);
>                       ret = -EINVAL;
>                       goto unlock_bo;
> @@ -1223,7 +1226,6 @@ static int send_bo_list_to_device(struct qaic_device 
> *qdev, struct drm_file *fil
>                       else
>                               ret = copy_exec_reqs(qdev, slice, dbc->id, 
> head, tail);
>                       if (ret) {
> -                             bo->queued = false;
>                               spin_unlock_irqrestore(&dbc->xfer_lock, flags);
>                               goto unlock_bo;
>                       }
> @@ -1246,8 +1248,7 @@ static int send_bo_list_to_device(struct qaic_device 
> *qdev, struct drm_file *fil
>               spin_lock_irqsave(&dbc->xfer_lock, flags);
>               bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, 
> xfer_list);
>               obj = &bo->base;
> -             bo->queued = false;
> -             list_del(&bo->xfer_list);
> +             list_del_init(&bo->xfer_list);
>               spin_unlock_irqrestore(&dbc->xfer_lock, flags);
>               dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
>               drm_gem_object_put(obj);
> @@ -1608,8 +1609,7 @@ irqreturn_t dbc_irq_threaded_fn(int irq, void *data)
>                        */
>                       dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, 
> bo->dir);
>                       bo->nr_slice_xfer_done = 0;
> -                     bo->queued = false;
> -                     list_del(&bo->xfer_list);
> +                     list_del_init(&bo->xfer_list);
>                       bo->perf_stats.req_processed_ts = ktime_get_ns();
>                       complete_all(&bo->xfer_done);
>                       drm_gem_object_put(&bo->base);
> @@ -1868,7 +1868,7 @@ int qaic_detach_slice_bo_ioctl(struct drm_device *dev, 
> void *data, struct drm_fi
>  
>       /* Check if BO is committed to H/W for DMA */
>       spin_lock_irqsave(&dbc->xfer_lock, flags);
> -     if (bo->queued) {
> +     if (bo_queued(bo)) {
>               spin_unlock_irqrestore(&dbc->xfer_lock, flags);
>               ret = -EBUSY;
>               goto unlock_ch_srcu;
> @@ -1898,8 +1898,7 @@ static void empty_xfer_list(struct qaic_device *qdev, 
> struct dma_bridge_chan *db
>       spin_lock_irqsave(&dbc->xfer_lock, flags);
>       while (!list_empty(&dbc->xfer_list)) {
>               bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list);
> -             bo->queued = false;
> -             list_del(&bo->xfer_list);
> +             list_del_init(&bo->xfer_list);
>               spin_unlock_irqrestore(&dbc->xfer_lock, flags);
>               bo->nr_slice_xfer_done = 0;
>               bo->req_id = 0;

Reply via email to