Convert ivpu from GEM-SHMEM to GEM-UMA. The latter is just a copy, so this change it merely renaming symbols. No functional changes.
GEM-SHMEM will become more self-contained for drivers without specific memory management. GEM-UMA's interfaces will remain flexible for drivers with UMA hardware, such as ivpu. Signed-off-by: Thomas Zimmermann <[email protected]> --- drivers/accel/ivpu/Kconfig | 2 +- drivers/accel/ivpu/ivpu_gem.c | 36 +++++++++++++++++------------------ drivers/accel/ivpu/ivpu_gem.h | 4 ++-- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig index 9e055b5ce03d..49ca139a9d31 100644 --- a/drivers/accel/ivpu/Kconfig +++ b/drivers/accel/ivpu/Kconfig @@ -5,8 +5,8 @@ config DRM_ACCEL_IVPU depends on DRM_ACCEL depends on X86_64 && !UML depends on PCI && PCI_MSI + select DRM_GEM_UMA_HELPER select FW_LOADER - select DRM_GEM_SHMEM_HELPER select GENERIC_ALLOCATOR select WANT_DEV_COREDUMP help diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index ece68f570b7e..7f4aeb482efb 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -84,7 +84,7 @@ int __must_check ivpu_bo_bind(struct ivpu_bo *bo) if (bo->base.base.import_attach) sgt = ivpu_bo_map_attachment(vdev, bo); else - sgt = drm_gem_shmem_get_pages_sgt(&bo->base); + sgt = drm_gem_uma_get_pages_sgt(&bo->base); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret); @@ -223,7 +223,7 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, get_dma_buf(dma_buf); - obj = drm_gem_shmem_prime_import_sg_table(dev, attach, NULL); + obj = drm_gem_uma_prime_import_sg_table(dev, attach, NULL); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto fail_detach; @@ -251,7 +251,7 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags) { - struct drm_gem_shmem_object *shmem; + struct drm_gem_uma_object *uma; struct ivpu_bo *bo; switch (flags & DRM_IVPU_BO_CACHE_MASK) { @@ -262,11 +262,11 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla return ERR_PTR(-EINVAL); } - shmem = drm_gem_shmem_create(&vdev->drm, size); - if (IS_ERR(shmem)) - return ERR_CAST(shmem); + uma = drm_gem_uma_create(&vdev->drm, size); + if (IS_ERR(uma)) + return ERR_CAST(uma); - bo = to_ivpu_bo(&shmem->base); + bo = to_ivpu_bo(&uma->base); bo->base.map_wc = flags & DRM_IVPU_BO_WC; bo->flags = flags; @@ -330,7 +330,7 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj) drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1); drm_WARN_ON(obj->dev, bo->base.base.vma_node.vm_files.rb_node); - drm_gem_shmem_free(&bo->base); + drm_gem_uma_free(&bo->base); } static enum drm_gem_object_status ivpu_gem_status(struct drm_gem_object *obj) @@ -347,15 +347,15 @@ static enum drm_gem_object_status ivpu_gem_status(struct drm_gem_object *obj) static const struct drm_gem_object_funcs ivpu_gem_funcs = { .free = ivpu_gem_bo_free, .open = ivpu_gem_bo_open, - .print_info = drm_gem_shmem_object_print_info, - .pin = drm_gem_shmem_object_pin, - .unpin = drm_gem_shmem_object_unpin, - .get_sg_table = drm_gem_shmem_object_get_sg_table, - .vmap = drm_gem_shmem_object_vmap, - .vunmap = drm_gem_shmem_object_vunmap, - .mmap = drm_gem_shmem_object_mmap, + .print_info = drm_gem_uma_object_print_info, + .pin = drm_gem_uma_object_pin, + .unpin = drm_gem_uma_object_unpin, + .get_sg_table = drm_gem_uma_object_get_sg_table, + .vmap = drm_gem_uma_object_vmap, + .vunmap = drm_gem_uma_object_vunmap, + .mmap = drm_gem_uma_object_mmap, .status = ivpu_gem_status, - .vm_ops = &drm_gem_shmem_vm_ops, + .vm_ops = &drm_gem_uma_vm_ops, }; int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) @@ -435,7 +435,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, if (flags & DRM_IVPU_BO_MAPPABLE) { ivpu_bo_lock(bo); - ret = drm_gem_shmem_vmap_locked(&bo->base, &map); + ret = drm_gem_uma_vmap_locked(&bo->base, &map); ivpu_bo_unlock(bo); if (ret) @@ -475,7 +475,7 @@ void ivpu_bo_free(struct ivpu_bo *bo) if (bo->flags & DRM_IVPU_BO_MAPPABLE) { ivpu_bo_lock(bo); - drm_gem_shmem_vunmap_locked(&bo->base, &map); + drm_gem_uma_vunmap_locked(&bo->base, &map); ivpu_bo_unlock(bo); } diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h index 0c3350f22b55..3e5d1a64deab 100644 --- a/drivers/accel/ivpu/ivpu_gem.h +++ b/drivers/accel/ivpu/ivpu_gem.h @@ -6,13 +6,13 @@ #define __IVPU_GEM_H__ #include <drm/drm_gem.h> -#include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_gem_uma_helper.h> #include <drm/drm_mm.h> struct ivpu_file_priv; struct ivpu_bo { - struct drm_gem_shmem_object base; + struct drm_gem_uma_object base; struct ivpu_mmu_context *ctx; struct list_head bo_list_node; struct drm_mm_node mm_node; -- 2.52.0
