From: Christian König <christian.koe...@amd.com>

[ Upstream commit 3084cf46cf8110826a42de8c8ef30e8fa48974c2 ]

Setting the no_gpu_wait flag means that the allocate BO must be available
immediately and we can't wait for any GPU operation to finish.

v2: squash in mem leak fix, rebase

Signed-off-by: Christian König <christian.koe...@amd.com>
Acked-by: Felix Kuehling <felix.kuehl...@amd.com>
Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
Signed-off-by: Sasha Levin <sas...@kernel.org>
---
 drivers/gpu/drm/ttm/ttm_bo.c | 44 +++++++++++++++++++++---------------
 1 file changed, 26 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 98819462f025f..f078036998092 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -926,7 +926,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
  */
 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
                                 struct ttm_mem_type_manager *man,
-                                struct ttm_mem_reg *mem)
+                                struct ttm_mem_reg *mem,
+                                bool no_wait_gpu)
 {
        struct dma_fence *fence;
        int ret;
@@ -935,19 +936,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object 
*bo,
        fence = dma_fence_get(man->move);
        spin_unlock(&man->move_lock);
 
-       if (fence) {
-               dma_resv_add_shared_fence(bo->base.resv, fence);
+       if (!fence)
+               return 0;
 
-               ret = dma_resv_reserve_shared(bo->base.resv, 1);
-               if (unlikely(ret)) {
-                       dma_fence_put(fence);
-                       return ret;
-               }
+       if (no_wait_gpu)
+               return -EBUSY;
+
+       dma_resv_add_shared_fence(bo->base.resv, fence);
 
-               dma_fence_put(bo->moving);
-               bo->moving = fence;
+       ret = dma_resv_reserve_shared(bo->base.resv, 1);
+       if (unlikely(ret)) {
+               dma_fence_put(fence);
+               return ret;
        }
 
+       dma_fence_put(bo->moving);
+       bo->moving = fence;
        return 0;
 }
 
@@ -978,7 +982,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object 
*bo,
                        return ret;
        } while (1);
 
-       return ttm_bo_add_move_fence(bo, man, mem);
+       return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
 }
 
 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -1120,14 +1124,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                if (unlikely(ret))
                        goto error;
 
-               if (mem->mm_node) {
-                       ret = ttm_bo_add_move_fence(bo, man, mem);
-                       if (unlikely(ret)) {
-                               (*man->func->put_node)(man, mem);
-                               goto error;
-                       }
-                       return 0;
+               if (!mem->mm_node)
+                       continue;
+
+               ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
+               if (unlikely(ret)) {
+                       (*man->func->put_node)(man, mem);
+                       if (ret == -EBUSY)
+                               continue;
+
+                       goto error;
                }
+               return 0;
        }
 
        for (i = 0; i < placement->num_busy_placement; ++i) {
-- 
2.20.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to