When an unmap or map operation that leads to a remap intersects with a GPU VA that spans over a repeated range, the newly spawned VAs must preserve the repeated property, ie, VA's range must be a multiple of gem.range, and also the VA's start address must be on a gem.range boundary. When this doesn't hold, disallow such operations and notify UM with an invalid argument error.
Signed-off-by: Adrián Larumbe <[email protected]> --- drivers/gpu/drm/drm_gpuvm.c | 67 +++++++++++++++++++++++++++++++++++++ include/drm/drm_gpuvm.h | 7 +++- 2 files changed, 73 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index ca7445f767fc..80750119221d 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -2462,6 +2462,65 @@ static int validate_map_request(struct drm_gpuvm *gpuvm, return 0; } +static int +validate_repeated_unmap_request(struct drm_gpuvm *gpuvm, + u64 req_addr, u64 req_end) +{ + struct drm_gpuva *first, *last, *va; + u64 multiple; + + if (!(gpuvm->flags & DRM_GPUVM_HAS_REPEAT_MAPS)) + return 0; + + /* Find the first and last VAs the map request intersects with */ + first = last = NULL; + drm_gpuvm_for_each_va_range(va, gpuvm, req_addr, req_end) { + if (!first) + first = va; + last = va; + } + + if (!first) + return 0; + + if (first->flags & DRM_GPUVA_REPEAT) { + u64 addr = first->va.addr; + u64 range = first->va.range; + u64 end = addr + range; + + drm_WARN_ON(gpuvm->drm, first->gem.repeat_range == 0); + + if (addr < req_addr) { + multiple = req_addr; + if (do_div(multiple, first->gem.repeat_range)) + return -EINVAL; + } + + if (end > req_end) { + multiple = req_end; + if (do_div(multiple, first->gem.repeat_range)) + return -EINVAL; + return 0; + } + } + + if ((first != last) && (last->flags & DRM_GPUVA_REPEAT)) { + u64 addr = last->va.addr; + u64 range = last->va.range; + u64 end = addr + range; + + drm_WARN_ON(last->vm->drm, last->gem.repeat_range == 0); + + if (end > req_end) { + multiple = req_end; + if (do_div(multiple, last->gem.repeat_range)) + return -EINVAL; + } + } + + return 0; +} + static int __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, const struct drm_gpuvm_ops *ops, void *priv, @@ -2479,6 +2538,10 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, if (unlikely(ret)) return -EINVAL; + ret = validate_repeated_unmap_request(gpuvm, req_addr, req_end); + if (ret) + return ret; + drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) { struct drm_gem_object *obj = va->gem.obj; u64 offset = va->gem.offset; @@ -2653,6 +2716,10 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range))) return -EINVAL; + ret = validate_repeated_unmap_request(gpuvm, req_addr, req_end); + if (ret) + return ret; + drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) { struct drm_gpuva_op_map prev = {}, next = {}; bool prev_split = false, next_split = false; diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index cd2f55bc1707..61f66dfe4ed7 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -230,10 +230,15 @@ enum drm_gpuvm_flags { */ DRM_GPUVM_IMMEDIATE_MODE = BIT(1), + /** + * @DRM_GPUVM_HAS_REPEAT_MAPS: There are repeated VAs in the GPUVM + */ + DRM_GPUVM_HAS_REPEAT_MAPS = BIT(2), + /** * @DRM_GPUVM_USERBITS: user defined bits */ - DRM_GPUVM_USERBITS = BIT(2), + DRM_GPUVM_USERBITS = BIT(3), }; /** -- 2.53.0
