From: Marek Olšák <marek.ol...@amd.com> for threaded gallium --- src/gallium/drivers/radeon/r600_perfcounter.c | 4 +- src/gallium/drivers/radeon/r600_query.c | 56 +++++++++++++-------------- src/gallium/drivers/radeon/r600_query.h | 4 +- 3 files changed, 32 insertions(+), 32 deletions(-)
diff --git a/src/gallium/drivers/radeon/r600_perfcounter.c b/src/gallium/drivers/radeon/r600_perfcounter.c index 4671b20..0c2c6e9 100644 --- a/src/gallium/drivers/radeon/r600_perfcounter.c +++ b/src/gallium/drivers/radeon/r600_perfcounter.c @@ -108,21 +108,21 @@ static void r600_pc_query_destroy(struct r600_common_context *ctx, struct r600_pc_group *group = query->groups; query->groups = group->next; FREE(group); } FREE(query->counters); r600_query_hw_destroy(ctx, rquery); } -static bool r600_pc_query_prepare_buffer(struct r600_common_context *ctx, +static bool r600_pc_query_prepare_buffer(struct r600_common_screen *screen, struct r600_query_hw *hwquery, struct r600_resource *buffer) { /* no-op */ return true; } static void r600_pc_query_emit_start(struct r600_common_context *ctx, struct r600_query_hw *hwquery, struct r600_resource *buffer, uint64_t va) @@ -410,21 +410,21 @@ struct pipe_query *r600_create_batch_query(struct pipe_context *ctx, counter->base = group->result_base + j; counter->stride = group->num_counters; counter->qwords = 1; if ((block->flags & R600_PC_BLOCK_SE) && group->se < 0) counter->qwords = screen->info.max_se; if (group->instance < 0) counter->qwords *= block->num_instances; } - if (!r600_query_hw_init(rctx, &query->b)) + if (!r600_query_hw_init(screen, &query->b)) goto error; return (struct pipe_query *)query; error: r600_pc_query_destroy(rctx, &query->b.b); return NULL; } static bool r600_init_block_names(struct r600_common_screen *screen, diff --git a/src/gallium/drivers/radeon/r600_query.c b/src/gallium/drivers/radeon/r600_query.c index 2c3b5f4..d834261 100644 --- a/src/gallium/drivers/radeon/r600_query.c +++ b/src/gallium/drivers/radeon/r600_query.c @@ -393,22 +393,21 @@ static bool r600_query_sw_get_result(struct r600_common_context *rctx, static struct r600_query_ops sw_query_ops = { .destroy = r600_query_sw_destroy, .begin = r600_query_sw_begin, .end = r600_query_sw_end, .get_result = r600_query_sw_get_result, .get_result_resource = NULL }; -static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx, - unsigned query_type) +static struct pipe_query *r600_query_sw_create(unsigned query_type) { struct r600_query_sw *query; query = CALLOC_STRUCT(r600_query_sw); if (!query) return NULL; query->b.type = query_type; query->b.ops = &sw_query_ops; @@ -426,61 +425,61 @@ void r600_query_hw_destroy(struct r600_common_context *rctx, struct r600_query_buffer *qbuf = prev; prev = prev->previous; r600_resource_reference(&qbuf->buf, NULL); FREE(qbuf); } r600_resource_reference(&query->buffer.buf, NULL); FREE(rquery); } -static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx, +static struct r600_resource *r600_new_query_buffer(struct r600_common_screen *rscreen, struct r600_query_hw *query) { unsigned buf_size = MAX2(query->result_size, - ctx->screen->info.min_alloc_size); + rscreen->info.min_alloc_size); /* Queries are normally read by the CPU after * being written by the gpu, hence staging is probably a good * usage pattern. */ struct r600_resource *buf = (struct r600_resource*) - pipe_buffer_create(ctx->b.screen, 0, + pipe_buffer_create(&rscreen->b, 0, PIPE_USAGE_STAGING, buf_size); if (!buf) return NULL; - if (!query->ops->prepare_buffer(ctx, query, buf)) { + if (!query->ops->prepare_buffer(rscreen, query, buf)) { r600_resource_reference(&buf, NULL); return NULL; } return buf; } -static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx, +static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen, struct r600_query_hw *query, struct r600_resource *buffer) { /* Callers ensure that the buffer is currently unused by the GPU. */ - uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL, - PIPE_TRANSFER_WRITE | - PIPE_TRANSFER_UNSYNCHRONIZED); + uint32_t *results = rscreen->ws->buffer_map(buffer->buf, NULL, + PIPE_TRANSFER_WRITE | + PIPE_TRANSFER_UNSYNCHRONIZED); if (!results) return false; memset(results, 0, buffer->b.b.width0); if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER || query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) { - unsigned max_rbs = ctx->screen->info.num_render_backends; - unsigned enabled_rb_mask = ctx->screen->info.enabled_rb_mask; + unsigned max_rbs = rscreen->info.num_render_backends; + unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask; unsigned num_results; unsigned i, j; /* Set top bits for unused backends. */ num_results = buffer->b.b.width0 / query->result_size; for (j = 0; j < num_results; j++) { for (i = 0; i < max_rbs; i++) { if (!(enabled_rb_mask & (1<<i))) { results[(i * 4)+1] = 0x80000000; results[(i * 4)+3] = 0x80000000; @@ -524,84 +523,84 @@ static void r600_query_hw_clear_result(struct r600_query_hw *, union pipe_query_result *); static struct r600_query_hw_ops query_hw_default_hw_ops = { .prepare_buffer = r600_query_hw_prepare_buffer, .emit_start = r600_query_hw_do_emit_start, .emit_stop = r600_query_hw_do_emit_stop, .clear_result = r600_query_hw_clear_result, .add_result = r600_query_hw_add_result, }; -bool r600_query_hw_init(struct r600_common_context *rctx, +bool r600_query_hw_init(struct r600_common_screen *rscreen, struct r600_query_hw *query) { - query->buffer.buf = r600_new_query_buffer(rctx, query); + query->buffer.buf = r600_new_query_buffer(rscreen, query); if (!query->buffer.buf) return false; return true; } -static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx, +static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscreen, unsigned query_type, unsigned index) { struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw); if (!query) return NULL; query->b.type = query_type; query->b.ops = &query_hw_ops; query->ops = &query_hw_default_hw_ops; switch (query_type) { case PIPE_QUERY_OCCLUSION_COUNTER: case PIPE_QUERY_OCCLUSION_PREDICATE: - query->result_size = 16 * rctx->screen->info.num_render_backends; + query->result_size = 16 * rscreen->info.num_render_backends; query->result_size += 16; /* for the fence + alignment */ query->num_cs_dw_begin = 6; - query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen); + query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen); break; case PIPE_QUERY_TIME_ELAPSED: query->result_size = 24; query->num_cs_dw_begin = 8; - query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen); + query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen); break; case PIPE_QUERY_TIMESTAMP: query->result_size = 16; - query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen); + query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen); query->flags = R600_QUERY_HW_FLAG_NO_START; break; case PIPE_QUERY_PRIMITIVES_EMITTED: case PIPE_QUERY_PRIMITIVES_GENERATED: case PIPE_QUERY_SO_STATISTICS: case PIPE_QUERY_SO_OVERFLOW_PREDICATE: /* NumPrimitivesWritten, PrimitiveStorageNeeded. */ query->result_size = 32; query->num_cs_dw_begin = 6; query->num_cs_dw_end = 6; query->stream = index; break; case PIPE_QUERY_PIPELINE_STATISTICS: /* 11 values on EG, 8 on R600. */ - query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16; + query->result_size = (rscreen->chip_class >= EVERGREEN ? 11 : 8) * 16; query->result_size += 8; /* for the fence + alignment */ query->num_cs_dw_begin = 6; - query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen); + query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen); break; default: assert(0); FREE(query); return NULL; } - if (!r600_query_hw_init(rctx, query)) { + if (!r600_query_hw_init(rscreen, query)) { FREE(query); return NULL; } return (struct pipe_query *)query; } static void r600_update_occlusion_query_state(struct r600_common_context *rctx, unsigned type, int diff) { @@ -694,21 +693,21 @@ static void r600_query_hw_emit_start(struct r600_common_context *ctx, ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end, true); /* Get a new query buffer if needed. */ if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) { struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer); *qbuf = query->buffer; query->buffer.results_end = 0; query->buffer.previous = qbuf; - query->buffer.buf = r600_new_query_buffer(ctx, query); + query->buffer.buf = r600_new_query_buffer(ctx->screen, query); if (!query->buffer.buf) return; } /* emit begin query */ va = query->buffer.buf->gpu_address + query->buffer.results_end; query->ops->emit_start(ctx, query, query->buffer.buf, va); ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end; @@ -854,28 +853,29 @@ static void r600_emit_query_predication(struct r600_common_context *ctx, results_base += query->result_size; /* set CONTINUE bit for all packets except the first */ op |= PREDICATION_CONTINUE; } } } static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index) { - struct r600_common_context *rctx = (struct r600_common_context *)ctx; + struct r600_common_screen *rscreen = + (struct r600_common_screen *)ctx->screen; if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT || query_type == PIPE_QUERY_GPU_FINISHED || query_type >= PIPE_QUERY_DRIVER_SPECIFIC) - return r600_query_sw_create(ctx, query_type); + return r600_query_sw_create(query_type); - return r600_query_hw_create(rctx, query_type, index); + return r600_query_hw_create(rscreen, query_type, index); } static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query) { struct r600_common_context *rctx = (struct r600_common_context *)ctx; struct r600_query *rquery = (struct r600_query *)query; rquery->ops->destroy(rctx, rquery); } @@ -901,23 +901,23 @@ void r600_query_hw_reset_buffers(struct r600_common_context *rctx, FREE(qbuf); } query->buffer.results_end = 0; query->buffer.previous = NULL; /* Obtain a new buffer if the current one can't be mapped without a stall. */ if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) || !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) { r600_resource_reference(&query->buffer.buf, NULL); - query->buffer.buf = r600_new_query_buffer(rctx, query); + query->buffer.buf = r600_new_query_buffer(rctx->screen, query); } else { - if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf)) + if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf)) r600_resource_reference(&query->buffer.buf, NULL); } } bool r600_query_hw_begin(struct r600_common_context *rctx, struct r600_query *rquery) { struct r600_query_hw *query = (struct r600_query_hw *)rquery; if (query->flags & R600_QUERY_HW_FLAG_NO_START) { diff --git a/src/gallium/drivers/radeon/r600_query.h b/src/gallium/drivers/radeon/r600_query.h index 84b834c..052034a 100644 --- a/src/gallium/drivers/radeon/r600_query.h +++ b/src/gallium/drivers/radeon/r600_query.h @@ -135,21 +135,21 @@ struct r600_query { }; enum { R600_QUERY_HW_FLAG_NO_START = (1 << 0), /* gap */ /* whether begin_query doesn't clear the result */ R600_QUERY_HW_FLAG_BEGIN_RESUMES = (1 << 2), }; struct r600_query_hw_ops { - bool (*prepare_buffer)(struct r600_common_context *, + bool (*prepare_buffer)(struct r600_common_screen *, struct r600_query_hw *, struct r600_resource *); void (*emit_start)(struct r600_common_context *, struct r600_query_hw *, struct r600_resource *buffer, uint64_t va); void (*emit_stop)(struct r600_common_context *, struct r600_query_hw *, struct r600_resource *buffer, uint64_t va); void (*clear_result)(struct r600_query_hw *, union pipe_query_result *); void (*add_result)(struct r600_common_context *ctx, @@ -180,21 +180,21 @@ struct r600_query_hw { unsigned result_size; /* The number of dwords for begin_query or end_query. */ unsigned num_cs_dw_begin; unsigned num_cs_dw_end; /* Linked list of queries */ struct list_head list; /* For transform feedback: which stream the query is for */ unsigned stream; }; -bool r600_query_hw_init(struct r600_common_context *rctx, +bool r600_query_hw_init(struct r600_common_screen *rscreen, struct r600_query_hw *query); void r600_query_hw_destroy(struct r600_common_context *rctx, struct r600_query *rquery); bool r600_query_hw_begin(struct r600_common_context *rctx, struct r600_query *rquery); bool r600_query_hw_end(struct r600_common_context *rctx, struct r600_query *rquery); bool r600_query_hw_get_result(struct r600_common_context *rctx, struct r600_query *rquery, bool wait, -- 2.7.4 _______________________________________________ mesa-dev mailing list mesa-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/mesa-dev