Am 13.06.2016 um 18:33 schrieb Daniel Vetter:
> On Mon, Jun 13, 2016 at 04:12:43PM +0200, Christian König wrote:
>> From: Christian König <christian.koenig at amd.com>
>>
>> A regular spin_lock/unlock should do here as well.
>>
>> Signed-off-by: Christian König <christian.koenig at amd.com>
> Just drive-by comment, but you can't mix up irq spinlocks with normal
> ones. And there's places where this is still irqsave, but some of these
> functions can be called directly from the scheduler kthread. You can only
> drop the _irqsave if you know you're always called from hardirq context
> (softirq isn't enough), or when you know someone already disabled
> interrupts. And you can simplify the _irqsave to just _irq when you are in
> always in process context and irqs are always still enabled.
>
> On a super-quick look seems fishy.

The point is there isn't even any IRQ involved in all of this.

The spin is either locked from a work item or the kthread, never from 
irq context.

Christian.

> -Daniel
>
>> ---
>>   drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 14 ++++++--------
>>   1 file changed, 6 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c 
>> b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
>> index b1d49c5..e13b140 100644
>> --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
>> +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
>> @@ -331,17 +331,16 @@ static void amd_sched_job_finish(struct work_struct 
>> *work)
>>      struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
>>                                                 finish_work);
>>      struct amd_gpu_scheduler *sched = s_job->sched;
>> -    unsigned long flags;
>>   
>>      /* remove job from ring_mirror_list */
>> -    spin_lock_irqsave(&sched->job_list_lock, flags);
>> +    spin_lock(&sched->job_list_lock);
>>      list_del_init(&s_job->node);
>>      if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
>>              struct amd_sched_job *next;
>>   
>> -            spin_unlock_irqrestore(&sched->job_list_lock, flags);
>> +            spin_unlock(&sched->job_list_lock);
>>              cancel_delayed_work_sync(&s_job->work_tdr);
>> -            spin_lock_irqsave(&sched->job_list_lock, flags);
>> +            spin_lock(&sched->job_list_lock);
>>   
>>              /* queue TDR for next job */
>>              next = list_first_entry_or_null(&sched->ring_mirror_list,
>> @@ -350,7 +349,7 @@ static void amd_sched_job_finish(struct work_struct 
>> *work)
>>              if (next)
>>                      schedule_delayed_work(&next->work_tdr, sched->timeout);
>>      }
>> -    spin_unlock_irqrestore(&sched->job_list_lock, flags);
>> +    spin_unlock(&sched->job_list_lock);
>>      sched->ops->free_job(s_job);
>>   }
>>   
>> @@ -364,15 +363,14 @@ static void amd_sched_job_finish_cb(struct fence *f, 
>> struct fence_cb *cb)
>>   static void amd_sched_job_begin(struct amd_sched_job *s_job)
>>   {
>>      struct amd_gpu_scheduler *sched = s_job->sched;
>> -    unsigned long flags;
>>   
>> -    spin_lock_irqsave(&sched->job_list_lock, flags);
>> +    spin_lock(&sched->job_list_lock);
>>      list_add_tail(&s_job->node, &sched->ring_mirror_list);
>>      if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
>>          list_first_entry_or_null(&sched->ring_mirror_list,
>>                                   struct amd_sched_job, node) == s_job)
>>              schedule_delayed_work(&s_job->work_tdr, sched->timeout);
>> -    spin_unlock_irqrestore(&sched->job_list_lock, flags);
>> +    spin_unlock(&sched->job_list_lock);
>>   }
>>   
>>   static void amd_sched_job_timedout(struct work_struct *work)
>> -- 
>> 2.5.0
>>
>> _______________________________________________
>> dri-devel mailing list
>> dri-devel at lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to