Add the 'bool shmem' as the 4th argument of etnaviv_gem_new_private(),
then call etnaviv_gem_new_handle() to allocate the etnaviv_gem_object
instance for us.

A small benefit is to reduce code duplication across different etnaviv
GEM buffer objects. This allow us to reuse etnaviv_gem_new_private()
everywhere, increasing code reusage.

We also should call drm_gem_private_object_fini() to uninitialize an
already allocated GEM object when it initialized failed. Now
etnaviv_gem_new_private() handle this trouble for us, the upper caller
can just use it, no need to worry about error handling anymore.

if true, the drm_gem_object_init() will allocate backing storage for us,
then this is a shmem buffer object. if false, we have to implement driver
specific backing storage.

Signed-off-by: Sui Jingfeng <sui.jingf...@linux.dev>
---
 drivers/gpu/drm/etnaviv/etnaviv_gem.c       | 28 +++++++++++++--------
 drivers/gpu/drm/etnaviv/etnaviv_gem.h       |  4 ++-
 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c |  2 +-
 3 files changed, 22 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c 
b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 3732288ff530..27e4a93c981c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -697,21 +697,20 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct 
drm_file *file,
        u32 size, u32 flags, u32 *handle)
 {
        struct etnaviv_drm_private *priv = to_etnaviv_priv(dev);
-       struct drm_gem_object *obj = NULL;
+       struct etnaviv_gem_object *etnaviv_obj;
+       struct drm_gem_object *obj;
        int ret;
 
        size = PAGE_ALIGN(size);
 
-       ret = etnaviv_gem_new_impl(dev, flags, &etnaviv_gem_shmem_ops, &obj);
+       ret = etnaviv_gem_new_private(dev, size, flags, true,
+                                     &etnaviv_gem_shmem_ops, &etnaviv_obj);
        if (ret)
                goto fail;
 
-       lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
-
-       ret = drm_gem_object_init(dev, obj, size);
-       if (ret)
-               goto fail;
+       lockdep_set_class(&etnaviv_obj->lock, &etnaviv_shm_lock_class);
 
+       obj = &etnaviv_obj->base;
        /*
         * Our buffers are kept pinned, so allocating them from the MOVABLE
         * zone is a really bad idea, and conflicts with CMA. See comments
@@ -732,7 +731,8 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct 
drm_file *file,
 }
 
 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
-       const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
+                           bool shmem, const struct etnaviv_gem_ops *ops,
+                           struct etnaviv_gem_object **res)
 {
        struct drm_gem_object *obj;
        int ret;
@@ -741,7 +741,15 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t 
size, u32 flags,
        if (ret)
                return ret;
 
-       drm_gem_private_object_init(dev, obj, size);
+       if (shmem) {
+               ret = drm_gem_object_init(dev, obj, size);
+               if (ret) {
+                       drm_gem_private_object_fini(obj);
+                       return ret;
+               }
+       } else {
+               drm_gem_private_object_init(dev, obj, size);
+       }
 
        *res = to_etnaviv_bo(obj);
 
@@ -830,7 +838,7 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct 
drm_file *file,
        struct etnaviv_gem_object *etnaviv_obj;
        int ret;
 
-       ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
+       ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, false,
                                      &etnaviv_gem_userptr_ops, &etnaviv_obj);
        if (ret)
                return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h 
b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index f2ac64d8e90b..b174a9e4cc48 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -118,7 +118,9 @@ void etnaviv_submit_put(struct etnaviv_gem_submit * submit);
 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
        struct drm_etnaviv_timespec *timeout);
 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
-       const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res);
+                           bool shmem, const struct etnaviv_gem_ops *ops,
+                           struct etnaviv_gem_object **res);
+
 void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
 void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c 
b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 0062d808d6a9..64a858a0b0cf 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -107,7 +107,7 @@ struct drm_gem_object 
*etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
        size_t size = PAGE_ALIGN(attach->dmabuf->size);
        int ret, npages;
 
-       ret = etnaviv_gem_new_private(dev, size, ETNA_BO_WC,
+       ret = etnaviv_gem_new_private(dev, size, ETNA_BO_WC, false,
                                      &etnaviv_gem_prime_ops, &etnaviv_obj);
        if (ret < 0)
                return ERR_PTR(ret);
-- 
2.43.0

Reply via email to