On Fri, 25 Jun 2021 15:43:45 +0200
Lucas Stach <l.st...@pengutronix.de> wrote:

> Am Freitag, dem 25.06.2021 um 15:33 +0200 schrieb Boris Brezillon:
> > If the process who submitted these jobs decided to close the FD before
> > the jobs are done it probably means it doesn't care about the result.
> > 
> > v3:
> > * Set fence error to ECANCELED when a TERMINATED exception is received
> > 
> > Signed-off-by: Boris Brezillon <boris.brezil...@collabora.com>
> > ---
> >  drivers/gpu/drm/panfrost/panfrost_job.c | 43 +++++++++++++++++++++----
> >  1 file changed, 37 insertions(+), 6 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
> > b/drivers/gpu/drm/panfrost/panfrost_job.c
> > index 948bd174ff99..aa1e6542adde 100644
> > --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> > +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> > @@ -498,14 +498,21 @@ static void panfrost_job_handle_irq(struct 
> > panfrost_device *pfdev, u32 status)
> >  
> >             if (status & JOB_INT_MASK_ERR(j)) {
> >                     u32 js_status = job_read(pfdev, JS_STATUS(j));
> > +                   const char *exception_name = 
> > panfrost_exception_name(js_status);
> >  
> >                     job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
> >  
> > -                   dev_err(pfdev->dev, "js fault, js=%d, status=%s, 
> > head=0x%x, tail=0x%x",
> > -                           j,
> > -                           panfrost_exception_name(js_status),
> > -                           job_read(pfdev, JS_HEAD_LO(j)),
> > -                           job_read(pfdev, JS_TAIL_LO(j)));
> > +                   if (js_status < 
> > DRM_PANFROST_EXCEPTION_JOB_CONFIG_FAULT) {
> > +                           dev_dbg(pfdev->dev, "js interrupt, js=%d, 
> > status=%s, head=0x%x, tail=0x%x",
> > +                                   j, exception_name,
> > +                                   job_read(pfdev, JS_HEAD_LO(j)),
> > +                                   job_read(pfdev, JS_TAIL_LO(j)));
> > +                   } else {
> > +                           dev_err(pfdev->dev, "js fault, js=%d, 
> > status=%s, head=0x%x, tail=0x%x",
> > +                                   j, exception_name,
> > +                                   job_read(pfdev, JS_HEAD_LO(j)),
> > +                                   job_read(pfdev, JS_TAIL_LO(j)));
> > +                   }
> >  
> >                     /* If we need a reset, signal it to the timeout
> >                      * handler, otherwise, update the fence error field and
> > @@ -514,7 +521,16 @@ static void panfrost_job_handle_irq(struct 
> > panfrost_device *pfdev, u32 status)
> >                     if (panfrost_exception_needs_reset(pfdev, js_status)) {
> >                             drm_sched_fault(&pfdev->js->queue[j].sched);
> >                     } else {
> > -                           dma_fence_set_error(pfdev->jobs[j]->done_fence, 
> > -EINVAL);
> > +                           int error = 0;
> > +
> > +                           if (js_status == 
> > DRM_PANFROST_EXCEPTION_TERMINATED)
> > +                                   error = -ECANCELED;
> > +                           else if (js_status >= 
> > DRM_PANFROST_EXCEPTION_JOB_CONFIG_FAULT)
> > +                                   error = -EINVAL;
> > +
> > +                           if (error)
> > +                                   
> > dma_fence_set_error(pfdev->jobs[j]->done_fence, error);
> > +
> >                             status |= JOB_INT_MASK_DONE(j);
> >                     }
> >             }
> > @@ -673,10 +689,25 @@ int panfrost_job_open(struct panfrost_file_priv 
> > *panfrost_priv)
> >  
> >  void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
> >  {
> > +   struct panfrost_device *pfdev = panfrost_priv->pfdev;
> > +   unsigned long flags;
> >     int i;
> >  
> >     for (i = 0; i < NUM_JOB_SLOTS; i++)
> >             drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
> > +
> > +   /* Kill in-flight jobs */
> > +   spin_lock_irqsave(&pfdev->js->job_lock, flags);  
> 
> Micro-optimization, but this code is never called from IRQ context, so
> a spin_lock_irq would do here, no need to save/restore flags.

Ah, right, I moved patches around. This patch was before the 'move to
threaded-irq' one in v2, but now that it's coming after, we can use a
regular lock here.

> 
> Regards,
> Lucas
> 
> > +   for (i = 0; i < NUM_JOB_SLOTS; i++) {
> > +           struct drm_sched_entity *entity = 
> > &panfrost_priv->sched_entity[i];
> > +           struct panfrost_job *job = pfdev->jobs[i];
> > +
> > +           if (!job || job->base.entity != entity)
> > +                   continue;
> > +
> > +           job_write(pfdev, JS_COMMAND(i), JS_COMMAND_HARD_STOP);
> > +   }
> > +   spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
> >  }
> >  
> >  int panfrost_job_is_idle(struct panfrost_device *pfdev)  
> 
> 

Reply via email to