From: Marek Olšák <marek.ol...@amd.com> If we enqueue too many jobs and destroy the GL context, it may take several seconds before the jobs finish. Just drop them instead. --- src/gallium/drivers/radeonsi/si_compute.c | 3 ++- src/gallium/drivers/radeonsi/si_state_shaders.c | 5 +++-- src/util/u_queue.c | 5 ++++- 3 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/src/gallium/drivers/radeonsi/si_compute.c b/src/gallium/drivers/radeonsi/si_compute.c index 4c98066..0338b8a 100644 --- a/src/gallium/drivers/radeonsi/si_compute.c +++ b/src/gallium/drivers/radeonsi/si_compute.c @@ -853,21 +853,22 @@ static void si_launch_grid( static void si_delete_compute_state(struct pipe_context *ctx, void* state){ struct si_compute *program = (struct si_compute *)state; struct si_context *sctx = (struct si_context*)ctx; if (!state) { return; } if (program->ir_type == PIPE_SHADER_IR_TGSI) { - util_queue_fence_wait(&program->ready); + util_queue_drop_job(&sctx->screen->shader_compiler_queue, + &program->ready); util_queue_fence_destroy(&program->ready); } if (program == sctx->cs_shader_state.program) sctx->cs_shader_state.program = NULL; if (program == sctx->cs_shader_state.emitted_program) sctx->cs_shader_state.emitted_program = NULL; si_shader_destroy(&program->shader); diff --git a/src/gallium/drivers/radeonsi/si_state_shaders.c b/src/gallium/drivers/radeonsi/si_state_shaders.c index 8ac4309..62bb221 100644 --- a/src/gallium/drivers/radeonsi/si_state_shaders.c +++ b/src/gallium/drivers/radeonsi/si_state_shaders.c @@ -2251,21 +2251,22 @@ static void si_bind_ps_shader(struct pipe_context *ctx, void *state) sctx->do_update_shaders = true; if (sel && sctx->ia_multi_vgt_param_key.u.uses_tess) si_update_tess_uses_prim_id(sctx); si_mark_atom_dirty(sctx, &sctx->cb_render_state); si_set_active_descriptors_for_shader(sctx, sel); } static void si_delete_shader(struct si_context *sctx, struct si_shader *shader) { if (shader->is_optimized) { - util_queue_fence_wait(&shader->optimized_ready); + util_queue_drop_job(&sctx->screen->shader_compiler_queue, + &shader->optimized_ready); util_queue_fence_destroy(&shader->optimized_ready); } if (shader->pm4) { switch (shader->selector->type) { case PIPE_SHADER_VERTEX: if (shader->key.as_ls) { assert(sctx->b.chip_class <= VI); si_pm4_delete_state(sctx, ls, shader->pm4); } else if (shader->key.as_es) { @@ -2308,21 +2309,21 @@ static void si_destroy_shader_selector(struct si_context *sctx, { struct si_shader *p = sel->first_variant, *c; struct si_shader_ctx_state *current_shader[SI_NUM_SHADERS] = { [PIPE_SHADER_VERTEX] = &sctx->vs_shader, [PIPE_SHADER_TESS_CTRL] = &sctx->tcs_shader, [PIPE_SHADER_TESS_EVAL] = &sctx->tes_shader, [PIPE_SHADER_GEOMETRY] = &sctx->gs_shader, [PIPE_SHADER_FRAGMENT] = &sctx->ps_shader, }; - util_queue_fence_wait(&sel->ready); + util_queue_drop_job(&sctx->screen->shader_compiler_queue, &sel->ready); if (current_shader[sel->type]->cso == sel) { current_shader[sel->type]->cso = NULL; current_shader[sel->type]->current = NULL; } while (p) { c = p->next_variant; si_delete_shader(sctx, p); p = c; diff --git a/src/util/u_queue.c b/src/util/u_queue.c index 3834b6f..99de34c 100644 --- a/src/util/u_queue.c +++ b/src/util/u_queue.c @@ -346,21 +346,24 @@ util_queue_drop_job(struct util_queue *queue, struct util_queue_fence *fence) { bool removed = false; if (util_queue_fence_is_signalled(fence)) return; mtx_lock(&queue->lock); for (unsigned i = queue->read_idx; i != queue->write_idx; i = (i + 1) % queue->max_jobs) { if (queue->jobs[i].fence == fence) { - /* Just clear it. The threads will drop it. */ + if (queue->jobs[i].cleanup) + queue->jobs[i].cleanup(queue->jobs[i].job, -1); + + /* Just clear it. The threads will treat as a no-op job. */ memset(&queue->jobs[i], 0, sizeof(queue->jobs[i])); removed = true; break; } } mtx_unlock(&queue->lock); if (removed) util_queue_fence_signal(fence); else -- 2.7.4 _______________________________________________ mesa-dev mailing list mesa-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/mesa-dev