Convert v3d from GEM-SHMEM to GEM-UMA. The latter is just a copy, so this change it merely renaming symbols. No functional changes.
GEM-SHMEM will become more self-contained for drivers without specific memory management. GEM-UMA's interfaces will remain flexible for drivers with UMA hardware, such as v3d. Signed-off-by: Thomas Zimmermann <[email protected]> --- drivers/gpu/drm/v3d/Kconfig | 2 +- drivers/gpu/drm/v3d/v3d_bo.c | 45 +++++++++++++++++------------------ drivers/gpu/drm/v3d/v3d_drv.h | 4 ++-- drivers/gpu/drm/v3d/v3d_mmu.c | 9 ++++--- 4 files changed, 29 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig index ce62c5908e1d..4345cb0f4dd6 100644 --- a/drivers/gpu/drm/v3d/Kconfig +++ b/drivers/gpu/drm/v3d/Kconfig @@ -5,8 +5,8 @@ config DRM_V3D depends on DRM depends on COMMON_CLK depends on MMU + select DRM_GEM_UMA_HELPER select DRM_SCHED - select DRM_GEM_SHMEM_HELPER help Choose this option if you have a system that has a Broadcom V3D 3.x or newer GPUs. SoCs supported include the BCM2711, diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index d9547f5117b9..842881e5f9a3 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -5,7 +5,7 @@ * DOC: V3D GEM BO management support * * Compared to VC4 (V3D 2.x), V3D 3.3 introduces an MMU between the - * GPU and the bus, allowing us to use shmem objects for our storage + * GPU and the bus, allowing us to use UMA objects for our storage * instead of CMA. * * Physically contiguous objects may still be imported to V3D, but the @@ -59,20 +59,20 @@ void v3d_free_object(struct drm_gem_object *obj) /* GPU execution may have dirtied any pages in the BO. */ bo->base.pages_mark_dirty_on_put = true; - drm_gem_shmem_free(&bo->base); + drm_gem_uma_free(&bo->base); } static const struct drm_gem_object_funcs v3d_gem_funcs = { .free = v3d_free_object, - .print_info = drm_gem_shmem_object_print_info, - .pin = drm_gem_shmem_object_pin, - .unpin = drm_gem_shmem_object_unpin, - .get_sg_table = drm_gem_shmem_object_get_sg_table, - .vmap = drm_gem_shmem_object_vmap, - .vunmap = drm_gem_shmem_object_vunmap, - .mmap = drm_gem_shmem_object_mmap, + .print_info = drm_gem_uma_object_print_info, + .pin = drm_gem_uma_object_pin, + .unpin = drm_gem_uma_object_unpin, + .get_sg_table = drm_gem_uma_object_get_sg_table, + .vmap = drm_gem_uma_object_vmap, + .vunmap = drm_gem_uma_object_vunmap, + .mmap = drm_gem_uma_object_mmap, .status = v3d_gem_status, - .vm_ops = &drm_gem_shmem_vm_ops, + .vm_ops = &drm_gem_uma_vm_ops, }; /* gem_create_object function for allocating a BO struct and doing @@ -108,9 +108,9 @@ v3d_bo_create_finish(struct drm_gem_object *obj) int ret; /* So far we pin the BO in the MMU for its lifetime, so use - * shmem's helper for getting a lifetime sgt. + * UMA's helper for getting a lifetime sgt. */ - sgt = drm_gem_shmem_get_pages_sgt(&bo->base); + sgt = drm_gem_uma_get_pages_sgt(&bo->base); if (IS_ERR(sgt)) return PTR_ERR(sgt); @@ -149,26 +149,25 @@ v3d_bo_create_finish(struct drm_gem_object *obj) struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, size_t unaligned_size) { - struct drm_gem_shmem_object *shmem_obj; + struct drm_gem_uma_object *uma_obj; struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_bo *bo; int ret; - shmem_obj = drm_gem_shmem_create_with_mnt(dev, unaligned_size, - v3d->gemfs); - if (IS_ERR(shmem_obj)) - return ERR_CAST(shmem_obj); - bo = to_v3d_bo(&shmem_obj->base); + uma_obj = drm_gem_uma_create_with_mnt(dev, unaligned_size, v3d->gemfs); + if (IS_ERR(uma_obj)) + return ERR_CAST(uma_obj); + bo = to_v3d_bo(&uma_obj->base); bo->vaddr = NULL; - ret = v3d_bo_create_finish(&shmem_obj->base); + ret = v3d_bo_create_finish(&uma_obj->base); if (ret) goto free_obj; return bo; free_obj: - drm_gem_shmem_free(shmem_obj); + drm_gem_uma_free(uma_obj); return ERR_PTR(ret); } @@ -180,13 +179,13 @@ v3d_prime_import_sg_table(struct drm_device *dev, struct drm_gem_object *obj; int ret; - obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); + obj = drm_gem_uma_prime_import_sg_table(dev, attach, sgt); if (IS_ERR(obj)) return obj; ret = v3d_bo_create_finish(obj); if (ret) { - drm_gem_shmem_free(&to_v3d_bo(obj)->base); + drm_gem_uma_free(&to_v3d_bo(obj)->base); return ERR_PTR(ret); } @@ -195,7 +194,7 @@ v3d_prime_import_sg_table(struct drm_device *dev, void v3d_get_bo_vaddr(struct v3d_bo *bo) { - struct drm_gem_shmem_object *obj = &bo->base; + struct drm_gem_uma_object *obj = &bo->base; bo->vaddr = vmap(obj->pages, obj->base.size >> PAGE_SHIFT, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 1884686985b8..3843d10fbd72 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -8,7 +8,7 @@ #include <drm/drm_encoder.h> #include <drm/drm_gem.h> -#include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_gem_uma_helper.h> #include <drm/gpu_scheduler.h> #include "v3d_performance_counters.h" @@ -243,7 +243,7 @@ struct v3d_file_priv { }; struct v3d_bo { - struct drm_gem_shmem_object base; + struct drm_gem_uma_object base; struct drm_mm_node node; diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c index a25d25a8ae61..a634ac3eaaf6 100644 --- a/drivers/gpu/drm/v3d/v3d_mmu.c +++ b/drivers/gpu/drm/v3d/v3d_mmu.c @@ -82,13 +82,13 @@ int v3d_mmu_set_page_table(struct v3d_dev *v3d) void v3d_mmu_insert_ptes(struct v3d_bo *bo) { - struct drm_gem_shmem_object *shmem_obj = &bo->base; - struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev); + struct drm_gem_uma_object *uma_obj = &bo->base; + struct v3d_dev *v3d = to_v3d_dev(uma_obj->base.dev); u32 page = bo->node.start; struct scatterlist *sgl; unsigned int count; - for_each_sgtable_dma_sg(shmem_obj->sgt, sgl, count) { + for_each_sgtable_dma_sg(uma_obj->sgt, sgl, count) { dma_addr_t dma_addr = sg_dma_address(sgl); u32 pfn = dma_addr >> V3D_MMU_PAGE_SHIFT; unsigned int len = sg_dma_len(sgl); @@ -121,8 +121,7 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo) } } - WARN_ON_ONCE(page - bo->node.start != - shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT); + WARN_ON_ONCE(page - bo->node.start != uma_obj->base.size >> V3D_MMU_PAGE_SHIFT); if (v3d_mmu_flush_all(v3d)) dev_err(v3d->drm.dev, "MMU flush timeout\n"); -- 2.52.0
