vm_op_enqueue allocates an msm_vm_op struct with kmalloc, but the return value is not checked for NULL value which can be returned by kmalloc under low-memory conditions. This can result in NULL pointer dereference when the pointer is dereferenced.
Add NULL check after the allocation and propagate -ENOMEM back to the caller in case of a failure. Signed-off-by: Gopi Krishna Menon <krishnagopi...@gmail.com> --- drivers/gpu/drm/msm/msm_gem_vma.c | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 00d0f3b7ba32..639425849d86 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -455,15 +455,20 @@ struct op_arg { bool kept; }; -static void +static int vm_op_enqueue(struct op_arg *arg, struct msm_vm_op _op) { struct msm_vm_op *op = kmalloc(sizeof(*op), GFP_KERNEL); + if (!op) + return -ENOMEM; + *op = _op; list_add_tail(&op->node, &arg->job->vm_ops); if (op->obj) drm_gem_object_get(op->obj); + + return 0; } static struct drm_gpuva * @@ -482,6 +487,7 @@ msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg) struct drm_gpuva *vma; struct sg_table *sgt; unsigned prot; + int ret; if (arg->kept) return 0; @@ -493,8 +499,6 @@ msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg) vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj, vma->va.addr, vma->va.range); - vma->flags = ((struct op_arg *)arg)->flags; - if (obj) { sgt = to_msm_bo(obj)->sgt; prot = msm_gem_prot(obj); @@ -503,7 +507,7 @@ msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg) prot = IOMMU_READ | IOMMU_WRITE; } - vm_op_enqueue(arg, (struct msm_vm_op){ + ret = vm_op_enqueue(arg, (struct msm_vm_op){ .op = MSM_VM_OP_MAP, .map = { .sgt = sgt, @@ -516,6 +520,10 @@ msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg) .obj = vma->gem.obj, }); + if (ret) + return ret; + + vma->flags = ((struct op_arg *)arg)->flags; to_msm_vma(vma)->mapped = true; return 0; @@ -531,6 +539,7 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg) struct drm_gpuvm_bo *vm_bo = orig_vma->vm_bo; bool mapped = to_msm_vma(orig_vma)->mapped; unsigned flags; + int ret; vm_dbg("orig_vma: %p:%p:%p: %016llx %016llx", vm, orig_vma, orig_vma->gem.obj, orig_vma->va.addr, orig_vma->va.range); @@ -540,7 +549,7 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg) drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range); - vm_op_enqueue(arg, (struct msm_vm_op){ + ret = vm_op_enqueue(arg, (struct msm_vm_op){ .op = MSM_VM_OP_UNMAP, .unmap = { .iova = unmap_start, @@ -550,6 +559,9 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg) .obj = orig_vma->gem.obj, }); + if (ret) + return ret; + /* * Part of this GEM obj is still mapped, but we're going to kill the * existing VMA and replace it with one or two new ones (ie. two if @@ -611,6 +623,7 @@ msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg) struct msm_vm_bind_job *job = arg->job; struct drm_gpuva *vma = op->unmap.va; struct msm_gem_vma *msm_vma = to_msm_vma(vma); + int ret; vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj, vma->va.addr, vma->va.range); @@ -643,7 +656,7 @@ msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg) if (!msm_vma->mapped) goto out_close; - vm_op_enqueue(arg, (struct msm_vm_op){ + ret = vm_op_enqueue(arg, (struct msm_vm_op){ .op = MSM_VM_OP_UNMAP, .unmap = { .iova = vma->va.addr, @@ -653,6 +666,9 @@ msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg) .obj = vma->gem.obj, }); + if (ret) + return ret; + msm_vma->mapped = false; out_close: -- 2.43.0