Use the existing helper instead of open coding it

Signed-off-by: Tvrtko Ursulin <[email protected]>
---
 .../gpu/drm/amd/amdgpu/amdgpu_userq_fence.c   | 79 ++++++-------------
 1 file changed, 23 insertions(+), 56 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index c28abc556907..7aa26e88c8ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -602,41 +602,26 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, 
void *data,
 int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *filp)
 {
-       u32 *syncobj_handles, *timeline_points, *timeline_handles, 
*bo_handles_read, *bo_handles_write;
-       u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
-       struct drm_amdgpu_userq_fence_info *fence_info = NULL;
        struct drm_amdgpu_userq_wait *wait_info = data;
+       const unsigned int num_write_bo_handles = 
wait_info->num_bo_write_handles;
+       const unsigned int num_read_bo_handles = wait_info->num_bo_read_handles;
+       struct drm_amdgpu_userq_fence_info *fence_info = NULL;
        struct amdgpu_fpriv *fpriv = filp->driver_priv;
        struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
+       struct drm_gem_object **gobj_write, **gobj_read;
+       u32 *timeline_points, *timeline_handles;
        struct amdgpu_usermode_queue *waitq;
-       struct drm_gem_object **gobj_write;
-       struct drm_gem_object **gobj_read;
+       u32 *syncobj_handles, num_syncobj;
        struct dma_fence **fences = NULL;
        u16 num_points, num_fences = 0;
-       int r, i, rentry, wentry, cnt;
        struct drm_exec exec;
-
-       num_read_bo_handles = wait_info->num_bo_read_handles;
-       bo_handles_read = 
memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
-                                     size_mul(sizeof(u32), 
num_read_bo_handles));
-       if (IS_ERR(bo_handles_read))
-               return PTR_ERR(bo_handles_read);
-
-       num_write_bo_handles = wait_info->num_bo_write_handles;
-       bo_handles_write = 
memdup_user(u64_to_user_ptr(wait_info->bo_write_handles),
-                                      size_mul(sizeof(u32), 
num_write_bo_handles));
-       if (IS_ERR(bo_handles_write)) {
-               r = PTR_ERR(bo_handles_write);
-               goto free_bo_handles_read;
-       }
+       int r, i, cnt;
 
        num_syncobj = wait_info->num_syncobj_handles;
        syncobj_handles = 
memdup_user(u64_to_user_ptr(wait_info->syncobj_handles),
                                      size_mul(sizeof(u32), num_syncobj));
-       if (IS_ERR(syncobj_handles)) {
-               r = PTR_ERR(syncobj_handles);
-               goto free_bo_handles_write;
-       }
+       if (IS_ERR(syncobj_handles))
+               return PTR_ERR(syncobj_handles);
 
        num_points = wait_info->num_syncobj_timeline_handles;
        timeline_handles = 
memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
@@ -653,33 +638,19 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void 
*data,
                goto free_timeline_handles;
        }
 
-       gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), 
GFP_KERNEL);
-       if (!gobj_read) {
-               r = -ENOMEM;
+       r = drm_gem_objects_lookup(filp,
+                                  u64_to_user_ptr(wait_info->bo_read_handles),
+                                  num_read_bo_handles,
+                                  &gobj_read);
+       if (r)
                goto free_timeline_points;
-       }
 
-       for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
-               gobj_read[rentry] = drm_gem_object_lookup(filp, 
bo_handles_read[rentry]);
-               if (!gobj_read[rentry]) {
-                       r = -ENOENT;
-                       goto put_gobj_read;
-               }
-       }
-
-       gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), 
GFP_KERNEL);
-       if (!gobj_write) {
-               r = -ENOMEM;
+       r = drm_gem_objects_lookup(filp,
+                                  u64_to_user_ptr(wait_info->bo_write_handles),
+                                  num_write_bo_handles,
+                                  &gobj_write);
+       if (r)
                goto put_gobj_read;
-       }
-
-       for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
-               gobj_write[wentry] = drm_gem_object_lookup(filp, 
bo_handles_write[wentry]);
-               if (!gobj_write[wentry]) {
-                       r = -ENOENT;
-                       goto put_gobj_write;
-               }
-       }
 
        drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
                      (num_read_bo_handles + num_write_bo_handles));
@@ -936,12 +907,12 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void 
*data,
 exec_fini:
        drm_exec_fini(&exec);
 put_gobj_write:
-       while (wentry-- > 0)
-               drm_gem_object_put(gobj_write[wentry]);
+       for (i = 0; i < num_write_bo_handles; i++)
+               drm_gem_object_put(gobj_write[i]);
        kfree(gobj_write);
 put_gobj_read:
-       while (rentry-- > 0)
-               drm_gem_object_put(gobj_read[rentry]);
+       for (i = 0; i < num_read_bo_handles; i++)
+               drm_gem_object_put(gobj_read[i]);;
        kfree(gobj_read);
 free_timeline_points:
        kfree(timeline_points);
@@ -949,10 +920,6 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void 
*data,
        kfree(timeline_handles);
 free_syncobj_handles:
        kfree(syncobj_handles);
-free_bo_handles_write:
-       kfree(bo_handles_write);
-free_bo_handles_read:
-       kfree(bo_handles_read);
 
        return r;
 }
-- 
2.51.1

Reply via email to