Convert imagination from GEM-SHMEM to GEM-UMA. The latter is just a
copy, so this change it merely renaming symbols. No functional changes.

GEM-SHMEM will become more self-contained for drivers without specific
memory management. GEM-UMA's interfaces will remain flexible for drivers
with UMA hardware, such as imagination.

Signed-off-by: Thomas Zimmermann <[email protected]>
---
 drivers/gpu/drm/imagination/Kconfig         |  4 +-
 drivers/gpu/drm/imagination/pvr_drv.c       |  2 +-
 drivers/gpu/drm/imagination/pvr_free_list.c |  2 +-
 drivers/gpu/drm/imagination/pvr_gem.c       | 74 ++++++++++-----------
 drivers/gpu/drm/imagination/pvr_gem.h       | 12 ++--
 5 files changed, 47 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/imagination/Kconfig 
b/drivers/gpu/drm/imagination/Kconfig
index 0482bfcefdde..ee796c9cfdf2 100644
--- a/drivers/gpu/drm/imagination/Kconfig
+++ b/drivers/gpu/drm/imagination/Kconfig
@@ -9,9 +9,9 @@ config DRM_POWERVR
        depends on PM
        depends on POWER_SEQUENCING || !POWER_SEQUENCING
        select DRM_EXEC
-       select DRM_GEM_SHMEM_HELPER
-       select DRM_SCHED
+       select DRM_GEM_UMA_HELPER
        select DRM_GPUVM
+       select DRM_SCHED
        select FW_LOADER
        help
          Choose this option if you have a system that has an Imagination
diff --git a/drivers/gpu/drm/imagination/pvr_drv.c 
b/drivers/gpu/drm/imagination/pvr_drv.c
index 916b40ced7eb..61bcbbef208c 100644
--- a/drivers/gpu/drm/imagination/pvr_drv.c
+++ b/drivers/gpu/drm/imagination/pvr_drv.c
@@ -1392,7 +1392,7 @@ static struct drm_driver pvr_drm_driver = {
        .minor = PVR_DRIVER_MINOR,
        .patchlevel = PVR_DRIVER_PATCHLEVEL,
 
-       .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
+       .gem_prime_import_sg_table = drm_gem_uma_prime_import_sg_table,
        .gem_create_object = pvr_gem_create_object,
 };
 
diff --git a/drivers/gpu/drm/imagination/pvr_free_list.c 
b/drivers/gpu/drm/imagination/pvr_free_list.c
index 5228e214491c..5b43f7ca2a6c 100644
--- a/drivers/gpu/drm/imagination/pvr_free_list.c
+++ b/drivers/gpu/drm/imagination/pvr_free_list.c
@@ -281,7 +281,7 @@ pvr_free_list_insert_node_locked(struct pvr_free_list_node 
*free_list_node)
        offset = (start_page * FREE_LIST_ENTRY_SIZE) &
                  ~((u64)ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE - 1);
 
-       sgt = drm_gem_shmem_get_pages_sgt(&free_list_node->mem_obj->base);
+       sgt = drm_gem_uma_get_pages_sgt(&free_list_node->mem_obj->base);
        if (WARN_ON(IS_ERR(sgt)))
                return PTR_ERR(sgt);
 
diff --git a/drivers/gpu/drm/imagination/pvr_gem.c 
b/drivers/gpu/drm/imagination/pvr_gem.c
index a66cf082af24..f29c9808f4e2 100644
--- a/drivers/gpu/drm/imagination/pvr_gem.c
+++ b/drivers/gpu/drm/imagination/pvr_gem.c
@@ -25,30 +25,30 @@
 
 static void pvr_gem_object_free(struct drm_gem_object *obj)
 {
-       drm_gem_shmem_object_free(obj);
+       drm_gem_uma_object_free(obj);
 }
 
 static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct 
*vma)
 {
        struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(gem_obj);
-       struct drm_gem_shmem_object *shmem_obj = 
shmem_gem_from_pvr_gem(pvr_obj);
+       struct drm_gem_uma_object *uma_obj = uma_gem_from_pvr_gem(pvr_obj);
 
        if (!(pvr_obj->flags & DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS))
                return -EINVAL;
 
-       return drm_gem_shmem_mmap(shmem_obj, vma);
+       return drm_gem_uma_mmap(uma_obj, vma);
 }
 
 static const struct drm_gem_object_funcs pvr_gem_object_funcs = {
        .free = pvr_gem_object_free,
-       .print_info = drm_gem_shmem_object_print_info,
-       .pin = drm_gem_shmem_object_pin,
-       .unpin = drm_gem_shmem_object_unpin,
-       .get_sg_table = drm_gem_shmem_object_get_sg_table,
-       .vmap = drm_gem_shmem_object_vmap,
-       .vunmap = drm_gem_shmem_object_vunmap,
+       .print_info = drm_gem_uma_object_print_info,
+       .pin = drm_gem_uma_object_pin,
+       .unpin = drm_gem_uma_object_unpin,
+       .get_sg_table = drm_gem_uma_object_get_sg_table,
+       .vmap = drm_gem_uma_object_vmap,
+       .vunmap = drm_gem_uma_object_vunmap,
        .mmap = pvr_gem_mmap,
-       .vm_ops = &drm_gem_shmem_vm_ops,
+       .vm_ops = &drm_gem_uma_vm_ops,
 };
 
 /**
@@ -195,25 +195,25 @@ pvr_gem_object_from_handle(struct pvr_file *pvr_file, u32 
handle)
 void *
 pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj)
 {
-       struct drm_gem_shmem_object *shmem_obj = 
shmem_gem_from_pvr_gem(pvr_obj);
+       struct drm_gem_uma_object *uma_obj = uma_gem_from_pvr_gem(pvr_obj);
        struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
        struct iosys_map map;
        int err;
 
        dma_resv_lock(obj->resv, NULL);
 
-       err = drm_gem_shmem_vmap_locked(shmem_obj, &map);
+       err = drm_gem_uma_vmap_locked(uma_obj, &map);
        if (err)
                goto err_unlock;
 
        if (pvr_obj->flags & PVR_BO_CPU_CACHED) {
-               struct device *dev = shmem_obj->base.dev->dev;
+               struct device *dev = uma_obj->base.dev->dev;
 
-               /* If shmem_obj->sgt is NULL, that means the buffer hasn't been 
mapped
+               /* If uma_obj->sgt is NULL, that means the buffer hasn't been 
mapped
                 * in GPU space yet.
                 */
-               if (shmem_obj->sgt)
-                       dma_sync_sgtable_for_cpu(dev, shmem_obj->sgt, 
DMA_BIDIRECTIONAL);
+               if (uma_obj->sgt)
+                       dma_sync_sgtable_for_cpu(dev, uma_obj->sgt, 
DMA_BIDIRECTIONAL);
        }
 
        dma_resv_unlock(obj->resv);
@@ -237,8 +237,8 @@ pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj)
 void
 pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj)
 {
-       struct drm_gem_shmem_object *shmem_obj = 
shmem_gem_from_pvr_gem(pvr_obj);
-       struct iosys_map map = IOSYS_MAP_INIT_VADDR(shmem_obj->vaddr);
+       struct drm_gem_uma_object *uma_obj = uma_gem_from_pvr_gem(pvr_obj);
+       struct iosys_map map = IOSYS_MAP_INIT_VADDR(uma_obj->vaddr);
        struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
 
        if (WARN_ON(!map.vaddr))
@@ -247,16 +247,16 @@ pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj)
        dma_resv_lock(obj->resv, NULL);
 
        if (pvr_obj->flags & PVR_BO_CPU_CACHED) {
-               struct device *dev = shmem_obj->base.dev->dev;
+               struct device *dev = uma_obj->base.dev->dev;
 
-               /* If shmem_obj->sgt is NULL, that means the buffer hasn't been 
mapped
+               /* If uma_obj->sgt is NULL, that means the buffer hasn't been 
mapped
                 * in GPU space yet.
                 */
-               if (shmem_obj->sgt)
-                       dma_sync_sgtable_for_device(dev, shmem_obj->sgt, 
DMA_BIDIRECTIONAL);
+               if (uma_obj->sgt)
+                       dma_sync_sgtable_for_device(dev, uma_obj->sgt, 
DMA_BIDIRECTIONAL);
        }
 
-       drm_gem_shmem_vunmap_locked(shmem_obj, &map);
+       drm_gem_uma_vunmap_locked(uma_obj, &map);
 
        dma_resv_unlock(obj->resv);
 }
@@ -336,7 +336,7 @@ struct pvr_gem_object *
 pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
 {
        struct drm_device *drm_dev = from_pvr_device(pvr_dev);
-       struct drm_gem_shmem_object *shmem_obj;
+       struct drm_gem_uma_object *uma_obj;
        struct pvr_gem_object *pvr_obj;
        struct sg_table *sgt;
        int err;
@@ -348,19 +348,19 @@ pvr_gem_object_create(struct pvr_device *pvr_dev, size_t 
size, u64 flags)
        if (device_get_dma_attr(drm_dev->dev) == DEV_DMA_COHERENT)
                flags |= PVR_BO_CPU_CACHED;
 
-       shmem_obj = drm_gem_shmem_create(drm_dev, size);
-       if (IS_ERR(shmem_obj))
-               return ERR_CAST(shmem_obj);
+       uma_obj = drm_gem_uma_create(drm_dev, size);
+       if (IS_ERR(uma_obj))
+               return ERR_CAST(uma_obj);
 
-       shmem_obj->pages_mark_dirty_on_put = true;
-       shmem_obj->map_wc = !(flags & PVR_BO_CPU_CACHED);
-       pvr_obj = shmem_gem_to_pvr_gem(shmem_obj);
+       uma_obj->pages_mark_dirty_on_put = true;
+       uma_obj->map_wc = !(flags & PVR_BO_CPU_CACHED);
+       pvr_obj = uma_gem_to_pvr_gem(uma_obj);
        pvr_obj->flags = flags;
 
-       sgt = drm_gem_shmem_get_pages_sgt(shmem_obj);
+       sgt = drm_gem_uma_get_pages_sgt(uma_obj);
        if (IS_ERR(sgt)) {
                err = PTR_ERR(sgt);
-               goto err_shmem_object_free;
+               goto err_uma_object_free;
        }
 
        dma_sync_sgtable_for_device(drm_dev->dev, sgt, DMA_BIDIRECTIONAL);
@@ -373,8 +373,8 @@ pvr_gem_object_create(struct pvr_device *pvr_dev, size_t 
size, u64 flags)
 
        return pvr_obj;
 
-err_shmem_object_free:
-       drm_gem_shmem_free(shmem_obj);
+err_uma_object_free:
+       drm_gem_uma_free(uma_obj);
 
        return ERR_PTR(err);
 }
@@ -394,13 +394,13 @@ int
 pvr_gem_get_dma_addr(struct pvr_gem_object *pvr_obj, u32 offset,
                     dma_addr_t *dma_addr_out)
 {
-       struct drm_gem_shmem_object *shmem_obj = 
shmem_gem_from_pvr_gem(pvr_obj);
+       struct drm_gem_uma_object *uma_obj = uma_gem_from_pvr_gem(pvr_obj);
        u32 accumulated_offset = 0;
        struct scatterlist *sgl;
        unsigned int sgt_idx;
 
-       WARN_ON(!shmem_obj->sgt);
-       for_each_sgtable_dma_sg(shmem_obj->sgt, sgl, sgt_idx) {
+       WARN_ON(!uma_obj->sgt);
+       for_each_sgtable_dma_sg(uma_obj->sgt, sgl, sgt_idx) {
                u32 new_offset = accumulated_offset + sg_dma_len(sgl);
 
                if (offset >= accumulated_offset && offset < new_offset) {
diff --git a/drivers/gpu/drm/imagination/pvr_gem.h 
b/drivers/gpu/drm/imagination/pvr_gem.h
index c99f30cc6208..59223876b3f7 100644
--- a/drivers/gpu/drm/imagination/pvr_gem.h
+++ b/drivers/gpu/drm/imagination/pvr_gem.h
@@ -10,7 +10,7 @@
 #include <uapi/drm/pvr_drm.h>
 
 #include <drm/drm_gem.h>
-#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_gem_uma_helper.h>
 #include <drm/drm_mm.h>
 
 #include <linux/bitfield.h>
@@ -82,12 +82,12 @@ struct pvr_file;
  */
 struct pvr_gem_object {
        /**
-        * @base: The underlying &struct drm_gem_shmem_object.
+        * @base: The underlying &struct drm_gem_uma_object.
         *
         * Do not access this member directly, instead call
         * shem_gem_from_pvr_gem().
         */
-       struct drm_gem_shmem_object base;
+       struct drm_gem_uma_object base;
 
        /**
         * @flags: Options set at creation-time. Some of these options apply to
@@ -111,9 +111,9 @@ struct pvr_gem_object {
 static_assert(offsetof(struct pvr_gem_object, base) == 0,
              "offsetof(struct pvr_gem_object, base) not zero");
 
-#define shmem_gem_from_pvr_gem(pvr_obj) (&(pvr_obj)->base)
+#define uma_gem_from_pvr_gem(pvr_obj) (&(pvr_obj)->base)
 
-#define shmem_gem_to_pvr_gem(shmem_obj) container_of_const(shmem_obj, struct 
pvr_gem_object, base)
+#define uma_gem_to_pvr_gem(uma_obj) container_of_const(uma_obj, struct 
pvr_gem_object, base)
 
 #define gem_from_pvr_gem(pvr_obj) (&(pvr_obj)->base.base)
 
@@ -134,7 +134,7 @@ struct pvr_gem_object *pvr_gem_object_from_handle(struct 
pvr_file *pvr_file,
 static __always_inline struct sg_table *
 pvr_gem_object_get_pages_sgt(struct pvr_gem_object *pvr_obj)
 {
-       return drm_gem_shmem_get_pages_sgt(shmem_gem_from_pvr_gem(pvr_obj));
+       return drm_gem_uma_get_pages_sgt(uma_gem_from_pvr_gem(pvr_obj));
 }
 
 void *pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj);
-- 
2.52.0

Reply via email to