Renamed 'map' to 'op' in drm_gpuvm_map_req for clarity and added corresponding documentation. No functional changes introduced.
Fixes: baf1638c0956 ("drm/gpuvm: Introduce drm_gpuvm_madvise_ops_create") Fixes: 000a45dce7ad ("drm/gpuvm: Pass map arguments through a struct") Suggested-by: Boris Brezillon <bbrezil...@kernel.org> Suggested-by: Danilo Krummrich <d...@kernel.org> Cc: Danilo Krummrich <d...@kernel.org> Cc: Matt Coster <matt.cos...@imgtec.com> Cc: Boris Brezillon <bbrezil...@kernel.org> Cc: Rob Clark <robin.cl...@oss.qualcomm.com> Cc: Matthew Brost <matthew.br...@intel.com> Cc: <dri-devel@lists.freedesktop.org> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimi...@intel.com> --- drivers/gpu/drm/drm_gpuvm.c | 36 +++++++++++++------------- drivers/gpu/drm/imagination/pvr_vm.c | 8 +++--- drivers/gpu/drm/msm/msm_gem_vma.c | 16 ++++++------ drivers/gpu/drm/nouveau/nouveau_uvmm.c | 8 +++--- drivers/gpu/drm/panthor/panthor_mmu.c | 8 +++--- drivers/gpu/drm/xe/xe_vm.c | 8 +++--- include/drm/drm_gpuvm.h | 4 +-- 7 files changed, 44 insertions(+), 44 deletions(-) diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index 39f934a91a7b..e9aaf9b287e7 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -552,11 +552,11 @@ * struct drm_gem_object *obj, u64 offset) * { * struct drm_gpuvm_map_req map_req = { - * .map.va.addr = addr, - * .map.va.range = range, - * .map.gem.obj = obj, - * .map.gem.offset = offset, - * }; + * .op.va.addr = addr, + * .op.va.range = range, + * .op.gem.obj = obj, + * .op.gem.offset = offset, + * }; * struct drm_gpuva_ops *ops; * struct drm_gpuva_op *op * struct drm_gpuvm_bo *vm_bo; @@ -2132,10 +2132,10 @@ op_map_cb(const struct drm_gpuvm_ops *fn, void *priv, return 0; op.op = DRM_GPUVA_OP_MAP; - op.map.va.addr = req->map.va.addr; - op.map.va.range = req->map.va.range; - op.map.gem.obj = req->map.gem.obj; - op.map.gem.offset = req->map.gem.offset; + op.map.va.addr = req->op.va.addr; + op.map.va.range = req->op.va.range; + op.map.gem.obj = req->op.gem.obj; + op.map.gem.offset = req->op.gem.offset; return fn->sm_step_map(&op, priv); } @@ -2180,12 +2180,12 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, const struct drm_gpuvm_map_req *req, bool madvise) { - struct drm_gem_object *req_obj = req->map.gem.obj; + struct drm_gem_object *req_obj = req->op.gem.obj; const struct drm_gpuvm_map_req *op_map = madvise ? NULL : req; struct drm_gpuva *va, *next; - u64 req_offset = req->map.gem.offset; - u64 req_range = req->map.va.range; - u64 req_addr = req->map.va.addr; + u64 req_offset = req->op.gem.offset; + u64 req_range = req->op.va.range; + u64 req_addr = req->op.va.addr; u64 req_end = req_addr + req_range; int ret; @@ -2272,8 +2272,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, if (madvise) { struct drm_gpuvm_map_req map_req = { - .map.va.addr = req_addr, - .map.va.range = end - req_addr, + .op.va.addr = req_addr, + .op.va.range = end - req_addr, }; ret = op_map_cb(ops, priv, &map_req); @@ -2340,8 +2340,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, if (madvise) { struct drm_gpuvm_map_req map_req = { - .map.va.addr = addr, - .map.va.range = req_end - addr, + .op.va.addr = addr, + .op.va.range = req_end - addr, }; return op_map_cb(ops, priv, &map_req); @@ -2583,7 +2583,7 @@ drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec, unsigned int num_fences, struct drm_gpuvm_map_req *req) { - struct drm_gem_object *req_obj = req->map.gem.obj; + struct drm_gem_object *req_obj = req->op.gem.obj; if (req_obj) { int ret = drm_exec_prepare_obj(exec, req_obj, num_fences); diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 3d97990170bf..983165eb3e6a 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -187,10 +187,10 @@ static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op) switch (bind_op->type) { case PVR_VM_BIND_TYPE_MAP: { const struct drm_gpuvm_map_req map_req = { - .map.va.addr = bind_op->device_addr, - .map.va.range = bind_op->size, - .map.gem.obj = gem_from_pvr_gem(bind_op->pvr_obj), - .map.gem.offset = bind_op->offset, + .op.va.addr = bind_op->device_addr, + .op.va.range = bind_op->size, + .op.gem.obj = gem_from_pvr_gem(bind_op->pvr_obj), + .op.gem.offset = bind_op->offset, }; return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr, diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 210604181c05..9b5d003bc5a2 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -1179,10 +1179,10 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec) case MSM_VM_BIND_OP_MAP: case MSM_VM_BIND_OP_MAP_NULL: { struct drm_gpuvm_map_req map_req = { - .map.va.addr = op->iova, - .map.va.range = op->range, - .map.gem.obj = op->obj, - .map.gem.offset = op->obj_offset, + .op.va.addr = op->iova, + .op.va.range = op->range, + .op.gem.obj = op->obj, + .op.gem.offset = op->obj_offset, }; ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req); @@ -1296,10 +1296,10 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job) fallthrough; case MSM_VM_BIND_OP_MAP_NULL: { struct drm_gpuvm_map_req map_req = { - .map.va.addr = op->iova, - .map.va.range = op->range, - .map.gem.obj = op->obj, - .map.gem.offset = op->obj_offset, + .op.va.addr = op->iova, + .op.va.range = op->range, + .op.gem.obj = op->obj, + .op.gem.offset = op->obj_offset, }; ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req); diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index d94a85509176..314121a857e7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1277,10 +1277,10 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job, case OP_MAP: { struct nouveau_uvma_region *reg; struct drm_gpuvm_map_req map_req = { - .map.va.addr = op->va.addr, - .map.va.range = op->va.range, - .map.gem.obj = op->gem.obj, - .map.gem.offset = op->gem.offset, + .op.va.addr = op->va.addr, + .op.va.range = op->va.range, + .op.gem.obj = op->gem.obj, + .op.gem.offset = op->gem.offset, }; reg = nouveau_uvma_region_find_first(uvmm, diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 2003b91a8409..3799e2c6ea59 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -2204,10 +2204,10 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op, switch (op_type) { case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: { const struct drm_gpuvm_map_req map_req = { - .map.va.addr = op->va.addr, - .map.va.range = op->va.range, - .map.gem.obj = op->map.vm_bo->obj, - .map.gem.offset = op->map.bo_offset, + .op.va.addr = op->va.addr, + .op.va.range = op->va.range, + .op.gem.obj = op->map.vm_bo->obj, + .op.gem.offset = op->map.bo_offset, }; if (vm->unusable) { diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index f35d69c0b4c6..66b54b152446 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -2339,10 +2339,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops, case DRM_XE_VM_BIND_OP_MAP: case DRM_XE_VM_BIND_OP_MAP_USERPTR: { struct drm_gpuvm_map_req map_req = { - .map.va.addr = addr, - .map.va.range = range, - .map.gem.obj = obj, - .map.gem.offset = bo_offset_or_userptr, + .op.va.addr = addr, + .op.va.range = range, + .op.gem.obj = obj, + .op.gem.offset = bo_offset_or_userptr, }; ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req); diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index 4a22b9d848f7..751c96a817ed 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -1054,9 +1054,9 @@ struct drm_gpuva_ops { */ struct drm_gpuvm_map_req { /** - * @op_map: struct drm_gpuva_op_map + * @op: struct drm_gpuva_op_map */ - struct drm_gpuva_op_map map; + struct drm_gpuva_op_map op; }; struct drm_gpuva_ops * -- 2.34.1