This allows one to pre-allocate resources on a sparse BO to avoid
faulting when the GPU accesses the memory region. Will be used by
the heap logic to pre-populate a heap object with a predefined number
of chunks.

Signed-off-by: Boris Brezillon <boris.brezil...@collabora.com>
---
 drivers/gpu/drm/panthor/panthor_mmu.c | 25 +++++++++++++++++++++++++
 drivers/gpu/drm/panthor/panthor_mmu.h |  2 ++
 2 files changed, 27 insertions(+)

diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c 
b/drivers/gpu/drm/panthor/panthor_mmu.c
index e05aaac10481..aea9b5f2ce64 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -1770,6 +1770,31 @@ static int panthor_vm_map_on_demand_locked(struct 
panthor_vm *vm,
        return 0;
 }
 
+int panthor_vm_pre_fault_range(struct panthor_vm *vm, u64 iova, u64 size,
+                              gfp_t page_gfp, gfp_t other_gfp)
+{
+       struct panthor_gem_object *bo = NULL;
+       struct drm_gpuva *gpuva;
+       struct panthor_vma *vma;
+       int ret;
+
+       mutex_lock(&vm->op_lock);
+       gpuva = drm_gpuva_find_first(&vm->base, iova, 1);
+       vma = gpuva ? container_of(gpuva, struct panthor_vma, base) : NULL;
+       if (vma && vma->base.gem.obj)
+               bo = to_panthor_bo(vma->base.gem.obj);
+
+       if (bo && (bo->flags & DRM_PANTHOR_BO_ALLOC_ON_FAULT)) {
+               ret = panthor_vm_map_on_demand_locked(vm, vma, iova - 
vma->base.va.addr,
+                                                     size, page_gfp, 
other_gfp);
+       } else {
+               ret = -EFAULT;
+       }
+       mutex_unlock(&vm->op_lock);
+
+       return ret;
+}
+
 static void panthor_vm_handle_fault_locked(struct panthor_vm *vm)
 {
        struct panthor_device *ptdev = vm->ptdev;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h 
b/drivers/gpu/drm/panthor/panthor_mmu.h
index fc274637114e..d57c86d293bd 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.h
+++ b/drivers/gpu/drm/panthor/panthor_mmu.h
@@ -28,6 +28,8 @@ int panthor_vm_map_bo_range(struct panthor_vm *vm, struct 
panthor_gem_object *bo
 int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size);
 struct panthor_gem_object *
 panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset);
+int panthor_vm_pre_fault_range(struct panthor_vm *vm, u64 iova, u64 size,
+                              gfp_t page_gfp, gfp_t other_gfp);
 
 int panthor_vm_active(struct panthor_vm *vm);
 void panthor_vm_idle(struct panthor_vm *vm);
-- 
2.49.0

Reply via email to