In order to reduce later churn, move a few parameters from the general brw_context into the intel_batchbuffer.
Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> --- src/mesa/drivers/dri/i965/brw_batch.h | 20 +++++++++++++++++ src/mesa/drivers/dri/i965/brw_compute.c | 2 +- src/mesa/drivers/dri/i965/brw_context.c | 9 +++----- src/mesa/drivers/dri/i965/brw_context.h | 32 ++++++--------------------- src/mesa/drivers/dri/i965/brw_draw.c | 2 +- src/mesa/drivers/dri/i965/intel_batchbuffer.c | 29 +++++++++++++----------- src/mesa/drivers/dri/i965/intel_screen.c | 4 ++-- 7 files changed, 50 insertions(+), 48 deletions(-) diff --git a/src/mesa/drivers/dri/i965/brw_batch.h b/src/mesa/drivers/dri/i965/brw_batch.h index 9df23e63d6..6c207b8a9b 100644 --- a/src/mesa/drivers/dri/i965/brw_batch.h +++ b/src/mesa/drivers/dri/i965/brw_batch.h @@ -70,6 +70,26 @@ typedef struct brw_batch { dri_bufmgr *bufmgr; + /** Framerate throttling: @{ */ + brw_bo *throttle_batch[2]; + + /* Limit the number of outstanding SwapBuffers by waiting for an earlier + * frame of rendering to complete. This gives a very precise cap to the + * latency between input and output such that rendering never gets more + * than a frame behind the user. (With the caveat that we technically are + * not using the SwapBuffers itself as a barrier but the first batch + * submitted afterwards, which may be immediately prior to the next + * SwapBuffers.) + */ + bool need_swap_throttle; + + /** General throttling, not caught by throttling between SwapBuffers */ + bool need_flush_throttle; + /** @} */ + + bool always_flush; + bool disable_throttling; + /** * Set of brw_bo* that have been rendered to within this batchbuffer * and would need flushing before being used from another cache domain that diff --git a/src/mesa/drivers/dri/i965/brw_compute.c b/src/mesa/drivers/dri/i965/brw_compute.c index 208bf0fed8..277a4cd198 100644 --- a/src/mesa/drivers/dri/i965/brw_compute.c +++ b/src/mesa/drivers/dri/i965/brw_compute.c @@ -236,7 +236,7 @@ brw_dispatch_compute_common(struct gl_context *ctx) */ brw_compute_state_finished(brw); - if (brw->always_flush_batch) + if (brw->batch.always_flush) intel_batchbuffer_flush(brw); brw_program_cache_check_size(brw); diff --git a/src/mesa/drivers/dri/i965/brw_context.c b/src/mesa/drivers/dri/i965/brw_context.c index ba16004ca9..f680371f76 100644 --- a/src/mesa/drivers/dri/i965/brw_context.c +++ b/src/mesa/drivers/dri/i965/brw_context.c @@ -409,7 +409,7 @@ intel_glFlush(struct gl_context *ctx) intel_flush_front(ctx); - brw->need_flush_throttle = true; + brw->batch.need_flush_throttle = true; } static void @@ -892,7 +892,7 @@ brw_process_driconf_options(struct brw_context *brw) if (driQueryOptionb(options, "always_flush_batch")) { fprintf(stderr, "flushing batchbuffer before/after each draw call\n"); - brw->always_flush_batch = true; + brw->batch.always_flush = true; } if (driQueryOptionb(options, "always_flush_cache")) { @@ -902,7 +902,7 @@ brw_process_driconf_options(struct brw_context *brw) if (driQueryOptionb(options, "disable_throttling")) { fprintf(stderr, "disabling flush throttling\n"); - brw->disable_throttling = true; + brw->batch.disable_throttling = true; } brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile"); @@ -1197,9 +1197,6 @@ intelDestroyContext(__DRIcontext * driContextPriv) brw_fini_pipe_control(brw); intel_batchbuffer_free(&brw->batch); - brw_bo_put(brw->throttle_batch[1]); - brw_bo_put(brw->throttle_batch[0]); - driDestroyOptionCache(&brw->optionCache); /* free the Mesa context */ diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/i965/brw_context.h index bf41be55ed..e602c413c2 100644 --- a/src/mesa/drivers/dri/i965/brw_context.h +++ b/src/mesa/drivers/dri/i965/brw_context.h @@ -722,33 +722,22 @@ struct brw_context */ bool front_buffer_dirty; - /** Framerate throttling: @{ */ - brw_bo *throttle_batch[2]; - - /* Limit the number of outstanding SwapBuffers by waiting for an earlier - * frame of rendering to complete. This gives a very precise cap to the - * latency between input and output such that rendering never gets more - * than a frame behind the user. (With the caveat that we technically are - * not using the SwapBuffers itself as a barrier but the first batch - * submitted afterwards, which may be immediately prior to the next - * SwapBuffers.) - */ - bool need_swap_throttle; - - /** General throttling, not caught by throttling between SwapBuffers */ - bool need_flush_throttle; - /** @} */ - GLuint stats_wm; /** + * Set if we're either a debug context or the INTEL_DEBUG=perf environment + * variable is set, this is the flag indicating to do expensive work that + * might lead to a perf_debug() call. + */ + bool perf_debug; + + /** * drirc options: * @{ */ bool no_rast; bool always_flush_batch; bool always_flush_cache; - bool disable_throttling; bool precompile; bool dual_color_blend_by_location; @@ -759,13 +748,6 @@ struct brw_context GLenum reduced_primitive; - /** - * Set if we're either a debug context or the INTEL_DEBUG=perf environment - * variable is set, this is the flag indicating to do expensive work that - * might lead to a perf_debug() call. - */ - bool perf_debug; - uint64_t max_gtt_map_object_size; int gen; diff --git a/src/mesa/drivers/dri/i965/brw_draw.c b/src/mesa/drivers/dri/i965/brw_draw.c index c497e4b3e8..f3699d4ecf 100644 --- a/src/mesa/drivers/dri/i965/brw_draw.c +++ b/src/mesa/drivers/dri/i965/brw_draw.c @@ -617,7 +617,7 @@ retry: brw_render_state_finished(brw); } - if (brw->always_flush_batch) + if (brw->batch.always_flush) intel_batchbuffer_flush(brw); brw_program_cache_check_size(brw); diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c index 35b6161fad..d62fd06a72 100644 --- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c +++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c @@ -104,6 +104,9 @@ intel_batchbuffer_free(struct brw_batch *batch) free(batch->cpu_map); brw_bo_put(batch->last_bo); brw_bo_put(batch->bo); + + brw_bo_put(batch->throttle_batch[1]); + brw_bo_put(batch->throttle_batch[0]); } void @@ -284,23 +287,23 @@ throttle(struct brw_context *brw) * the swap, and getting our hands on that doesn't seem worth it, * so we just use the first batch we emitted after the last swap. */ - if (brw->need_swap_throttle && brw->throttle_batch[0]) { - if (brw->throttle_batch[1]) { - if (!brw->disable_throttling) - drm_intel_bo_wait_rendering(brw->throttle_batch[1]); - brw_bo_put(brw->throttle_batch[1]); + if (brw->batch.need_swap_throttle && brw->batch.throttle_batch[0]) { + if (brw->batch.throttle_batch[1]) { + if (!brw->batch.disable_throttling) + drm_intel_bo_wait_rendering(brw->batch.throttle_batch[1]); + brw_bo_put(brw->batch.throttle_batch[1]); } - brw->throttle_batch[1] = brw->throttle_batch[0]; - brw->throttle_batch[0] = NULL; - brw->need_swap_throttle = false; + brw->batch.throttle_batch[1] = brw->batch.throttle_batch[0]; + brw->batch.throttle_batch[0] = NULL; + brw->batch.need_swap_throttle = false; /* Throttling here is more precise than the throttle ioctl, so skip it */ - brw->need_flush_throttle = false; + brw->batch.need_flush_throttle = false; } - if (brw->need_flush_throttle) { + if (brw->batch.need_flush_throttle) { __DRIscreen *dri_screen = brw->screen->driScrnPriv; drmCommandNone(dri_screen->fd, DRM_I915_GEM_THROTTLE); - brw->need_flush_throttle = false; + brw->batch.need_flush_throttle = false; } } @@ -380,8 +383,8 @@ _intel_batchbuffer_flush(struct brw_context *brw, if (USED_BATCH(brw->batch) == 0) return 0; - if (brw->throttle_batch[0] == NULL) - brw->throttle_batch[0] = brw_bo_get(brw->batch.bo); + if (brw->batch.throttle_batch[0] == NULL) + brw->batch.throttle_batch[0] = brw_bo_get(brw->batch.bo); if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) { int bytes_for_commands = 4 * USED_BATCH(brw->batch); diff --git a/src/mesa/drivers/dri/i965/intel_screen.c b/src/mesa/drivers/dri/i965/intel_screen.c index b3f2787005..acbe9948bf 100644 --- a/src/mesa/drivers/dri/i965/intel_screen.c +++ b/src/mesa/drivers/dri/i965/intel_screen.c @@ -180,9 +180,9 @@ intel_dri2_flush_with_flags(__DRIcontext *cPriv, intel_resolve_for_dri2_flush(brw, dPriv); if (reason == __DRI2_THROTTLE_SWAPBUFFER) - brw->need_swap_throttle = true; + brw->batch.need_swap_throttle = true; if (reason == __DRI2_THROTTLE_FLUSHFRONT) - brw->need_flush_throttle = true; + brw->batch.need_flush_throttle = true; intel_batchbuffer_flush(brw); -- 2.11.0 _______________________________________________ mesa-dev mailing list mesa-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/mesa-dev