Hi,

On Tue, 2026-03-31 at 11:46 +0200, Christian König wrote:
> On 3/31/26 11:20, Thomas Hellström wrote:
> > Drivers were accessing this drm_exec member directly.
> 
> I don't see a problem with that as long as we have documented that
> this is allowed.

It's more of forward-looking for the case I mentioned in the cover-
letter. If drm_exec becomes a subclass of a drm_transaction or
whatever, then this would likely be &exec->txn.ticket;

Could ofc postpone that to any such refactor, but since the patch is up
for review...

Thanks,
Thomas


> 
> Regards,
> Christian.
> 
> > Provide an accessor, drm_exec_ticket() to avoid that.
> > 
> > Signed-off-by: Thomas Hellström <[email protected]>
> > ---
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 4 ++--
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c           | 6 +++---
> >  drivers/gpu/drm/xe/xe_validation.c               | 4 ++--
> >  include/drm/drm_exec.h                           | 5 +++++
> >  4 files changed, 12 insertions(+), 7 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> > index 29b400cdd6d5..8a4fb9a62485 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> > @@ -2998,7 +2998,7 @@ int
> > amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct
> > dma_fence __rcu *
> >     /* Validate PDs, PTs and evicted DMABuf imports last.
> > Otherwise BO
> >      * validations above would invalidate DMABuf imports
> > again.
> >      */
> > -   ret = process_validate_vms(process_info, &exec.ticket);
> > +   ret = process_validate_vms(process_info,
> > drm_exec_ticket(exec));
> >     if (ret) {
> >             pr_debug("Validating VMs failed, ret: %d\n", ret);
> >             goto validate_map_fail;
> > @@ -3039,7 +3039,7 @@ int
> > amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct
> > dma_fence __rcu *
> >                     goto validate_map_fail;
> >             }
> >  
> > -           ret = amdgpu_vm_handle_moved(adev, peer_vm,
> > &exec.ticket);
> > +           ret = amdgpu_vm_handle_moved(adev, peer_vm,
> > drm_exec_ticket(exec));
> >             if (ret) {
> >                     dev_dbg(adev->dev,
> >                             "Memory eviction: handle moved
> > failed, pid %8d. Try again.\n",
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> > index c4ee19603460..c725a7976c63 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> > @@ -1157,7 +1157,7 @@ static int amdgpu_cs_vm_handling(struct
> > amdgpu_cs_parser *p)
> >                     return r;
> >     }
> >  
> > -   r = amdgpu_vm_handle_moved(adev, vm, &p->exec.ticket);
> > +   r = amdgpu_vm_handle_moved(adev, vm, drm_exec_ticket(&p-
> > >exec));
> >     if (r)
> >             return r;
> >  
> > @@ -1358,7 +1358,7 @@ static int amdgpu_cs_submit(struct
> > amdgpu_cs_parser *p,
> >     cs->out.handle = seq;
> >     leader->uf_sequence = seq;
> >  
> > -   amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->exec.ticket);
> > +   amdgpu_vm_bo_trace_cs(&fpriv->vm, drm_exec_ticket(&p-
> > >exec));
> >     for (i = 0; i < p->gang_size; ++i) {
> >             amdgpu_job_free_resources(p->jobs[i]);
> >             trace_amdgpu_cs_ioctl(p->jobs[i]);
> > @@ -1793,7 +1793,7 @@ int amdgpu_cs_find_mapping(struct
> > amdgpu_cs_parser *parser,
> >     *map = mapping;
> >  
> >     /* Double check that the BO is reserved by this CS */
> > -   if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser-
> > >exec.ticket)
> > +   if (dma_resv_locking_ctx((*bo)->tbo.base.resv) !=
> > drm_exec_ticket(&parser->exec))
> >             return -EINVAL;
> >  
> >     /* Make sure VRAM is allocated contigiously */
> > diff --git a/drivers/gpu/drm/xe/xe_validation.c
> > b/drivers/gpu/drm/xe/xe_validation.c
> > index a611438eaafe..8dff4d0ec895 100644
> > --- a/drivers/gpu/drm/xe/xe_validation.c
> > +++ b/drivers/gpu/drm/xe/xe_validation.c
> > @@ -156,7 +156,7 @@ int xe_validation_ctx_init(struct
> > xe_validation_ctx *ctx, struct xe_validation_d
> >  
> >  #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
> >  /*
> > - * This abuses both drm_exec and ww_mutex internals and should be
> > + * This abuses ww_mutex internals and should be
> >   * replaced by checking for -EDEADLK when we can make TTM
> >   * stop converting -EDEADLK to -ENOMEM.
> >   * An alternative is to not have exhaustive eviction with
> > @@ -164,7 +164,7 @@ int xe_validation_ctx_init(struct
> > xe_validation_ctx *ctx, struct xe_validation_d
> >   */
> >  static bool xe_validation_contention_injected(struct drm_exec
> > *exec)
> >  {
> > -   return !!exec->ticket.contending_lock;
> > +   return !!drm_exec_ticket(exec)->contending_lock;
> >  }
> >  
> >  #else
> > diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h
> > index 5ed5be1f8244..50d056a87de0 100644
> > --- a/include/drm/drm_exec.h
> > +++ b/include/drm/drm_exec.h
> > @@ -151,6 +151,11 @@ static inline bool
> > drm_exec_is_contended(struct drm_exec *exec)
> >             goto *__drm_exec_retry_ptr;             \
> >     } while (0)
> >  
> > +static inline struct ww_acquire_ctx *drm_exec_ticket(struct
> > drm_exec *exec)
> > +{
> > +   return &exec->ticket;
> > +}
> > +
> >  void drm_exec_init(struct drm_exec *exec, u32 flags, unsigned nr);
> >  void drm_exec_fini(struct drm_exec *exec);
> >  bool drm_exec_cleanup(struct drm_exec *exec);

Reply via email to