From: Nicolai Hähnle <nicolai.haeh...@amd.com> We never add fences to backing buffers during submit. When we free a backing buffer, it must inherit the sparse buffer's fences, so that it doesn't get re-used prematurely via the cache.
v2: - remove pipe_mutex_* --- src/gallium/winsys/amdgpu/drm/amdgpu_bo.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c index ec84058..567399d 100644 --- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c +++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c @@ -660,24 +660,31 @@ sparse_backing_alloc(struct amdgpu_winsys_bo *bo, uint32_t *pstart_page, uint32_ if (best_backing->chunks[best_idx].begin >= best_backing->chunks[best_idx].end) { memmove(&best_backing->chunks[best_idx], &best_backing->chunks[best_idx + 1], sizeof(*best_backing->chunks) * (best_backing->num_chunks - best_idx - 1)); best_backing->num_chunks--; } return best_backing; } static void -sparse_free_backing_buffer(struct amdgpu_sparse_backing *backing) +sparse_free_backing_buffer(struct amdgpu_winsys_bo *bo, + struct amdgpu_sparse_backing *backing) { + struct amdgpu_winsys *ws = backing->bo->ws; + bo->u.sparse.num_backing_pages -= backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE; + mtx_lock(&ws->bo_fence_lock); + amdgpu_add_fences(backing->bo, bo->num_fences, bo->fences); + mtx_unlock(&ws->bo_fence_lock); + list_del(&backing->list); amdgpu_winsys_bo_reference(&backing->bo, NULL); FREE(backing->chunks); FREE(backing); } /* * Return a range of pages from the given backing buffer back into the * free structure. */ @@ -730,42 +737,43 @@ sparse_backing_free(struct amdgpu_winsys_bo *bo, memmove(&backing->chunks[low + 1], &backing->chunks[low], sizeof(*backing->chunks) * (backing->num_chunks - low)); backing->chunks[low].begin = start_page; backing->chunks[low].end = end_page; backing->num_chunks++; } if (backing->num_chunks == 1 && backing->chunks[0].begin == 0 && backing->chunks[0].end == backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE) - sparse_free_backing_buffer(backing); + sparse_free_backing_buffer(bo, backing); return true; } static void amdgpu_bo_sparse_destroy(struct pb_buffer *_buf) { struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf); int r; assert(!bo->bo && bo->sparse); r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0, (uint64_t)bo->u.sparse.num_va_pages * RADEON_SPARSE_PAGE_SIZE, bo->va, 0, AMDGPU_VA_OP_CLEAR); if (r) { fprintf(stderr, "amdgpu: clearing PRT VA region on destroy failed (%d)\n", r); } while (!list_empty(&bo->u.sparse.backing)) { struct amdgpu_sparse_backing *dummy = NULL; - sparse_free_backing_buffer(container_of(bo->u.sparse.backing.next, + sparse_free_backing_buffer(bo, + container_of(bo->u.sparse.backing.next, dummy, list)); } amdgpu_va_range_free(bo->u.sparse.va_handle); mtx_destroy(&bo->u.sparse.commit_lock); FREE(bo->u.sparse.commitments); FREE(bo); } static const struct pb_vtbl amdgpu_winsys_bo_sparse_vtbl = { -- 2.9.3 _______________________________________________ mesa-dev mailing list mesa-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/mesa-dev