On Wed, Dec 2, 2015 at 12:33 AM, Kenneth Graunke <kenn...@whitecape.org> wrote: > On Wednesday, November 25, 2015 09:00:09 PM Jason Ekstrand wrote: >> --- >> src/mesa/drivers/dri/i965/brw_fs.h | 3 +- >> src/mesa/drivers/dri/i965/brw_fs_nir.cpp | 145 >> ++++++++++++++----------------- >> src/mesa/drivers/dri/i965/brw_nir.c | 36 ++++++-- >> 3 files changed, 92 insertions(+), 92 deletions(-) >> >> diff --git a/src/mesa/drivers/dri/i965/brw_fs.h >> b/src/mesa/drivers/dri/i965/brw_fs.h >> index 2d408b2..b8891dd 100644 >> --- a/src/mesa/drivers/dri/i965/brw_fs.h >> +++ b/src/mesa/drivers/dri/i965/brw_fs.h >> @@ -296,8 +296,7 @@ public: >> unsigned stream_id); >> void emit_gs_thread_end(); >> void emit_gs_input_load(const fs_reg &dst, const nir_src &vertex_src, >> - const fs_reg &indirect_offset, unsigned >> imm_offset, >> - unsigned num_components); >> + const nir_src &offset_src, unsigned >> num_components); >> void emit_cs_terminate(); >> fs_reg *emit_cs_local_invocation_id_setup(); >> fs_reg *emit_cs_work_group_id_setup(); >> diff --git a/src/mesa/drivers/dri/i965/brw_fs_nir.cpp >> b/src/mesa/drivers/dri/i965/brw_fs_nir.cpp >> index c439da2..b2e6ee8 100644 >> --- a/src/mesa/drivers/dri/i965/brw_fs_nir.cpp >> +++ b/src/mesa/drivers/dri/i965/brw_fs_nir.cpp >> @@ -1601,25 +1601,26 @@ fs_visitor::emit_gs_vertex(const nir_src >> &vertex_count_nir_src, >> void >> fs_visitor::emit_gs_input_load(const fs_reg &dst, >> const nir_src &vertex_src, >> - const fs_reg &indirect_offset, >> - unsigned imm_offset, >> + const nir_src &offset_src, >> unsigned num_components) >> { >> struct brw_gs_prog_data *gs_prog_data = (struct brw_gs_prog_data *) >> prog_data; >> >> + nir_const_value *vertex_const = nir_src_as_const_value(vertex_src); >> + nir_const_value *offset_const = nir_src_as_const_value(offset_src); >> + const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8; >> + >> /* Offset 0 is the VUE header, which contains VARYING_SLOT_LAYER [.y], >> * VARYING_SLOT_VIEWPORT [.z], and VARYING_SLOT_PSIZ [.w]. Only >> * gl_PointSize is available as a GS input, however, so it must be that. >> */ >> const bool is_point_size = >> - indirect_offset.file == BAD_FILE && imm_offset == 0; >> - >> - nir_const_value *vertex_const = nir_src_as_const_value(vertex_src); >> - const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8; >> + offset_const != NULL && offset_const->u[0] == 0; >> >> - if (indirect_offset.file == BAD_FILE && vertex_const != NULL && >> - 4 * imm_offset < push_reg_count) { >> - imm_offset = 4 * imm_offset + vertex_const->u[0] * push_reg_count; >> + if (offset_const != NULL && vertex_const != NULL && >> + 4 * offset_const->u[0] < push_reg_count) { > > Hmm, I thought you were planning to write a NIR helper that looked if an > expression was (<const> + <non-const>) and return both pieces? As is, > this will generate different code...I used to use the global offset for > the constant part, and the per-slot offsets for only the non-constant > part. I think that can save some ADD instructions.
I'll be sending out a V2 that puts the base_offset back in but in a well-defined and sane manner. Eric and rob both want to keep the base+offset pair for opaque storage. >> + int imm_offset = offset_const->u[0] * 4 + >> + vertex_const->u[0] * push_reg_count; >> /* This input was pushed into registers. */ >> if (is_point_size) { >> /* gl_PointSize comes in .w */ >> @@ -1681,21 +1682,21 @@ fs_visitor::emit_gs_input_load(const fs_reg &dst, >> } >> >> fs_inst *inst; >> - if (indirect_offset.file == BAD_FILE) { >> + if (offset_const) { >> /* Constant indexing - use global offset. */ >> inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle); >> - inst->offset = imm_offset; >> + inst->offset = offset_const->u[0]; >> inst->base_mrf = -1; >> inst->mlen = 1; >> inst->regs_written = num_components; >> } else { >> /* Indirect indexing - use per-slot offsets as well. */ >> - const fs_reg srcs[] = { icp_handle, indirect_offset }; >> + const fs_reg srcs[] = { icp_handle, get_nir_src(offset_src) }; >> fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2); >> bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0); >> >> inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, >> payload); >> - inst->offset = imm_offset; >> + inst->offset = 0; >> inst->base_mrf = -1; >> inst->mlen = 2; >> inst->regs_written = num_components; >> @@ -1761,16 +1762,11 @@ fs_visitor::nir_emit_gs_intrinsic(const fs_builder >> &bld, >> retype(fs_reg(brw_vec8_grf(2, 0)), BRW_REGISTER_TYPE_UD)); >> break; >> >> - case nir_intrinsic_load_input_indirect: >> case nir_intrinsic_load_input: >> unreachable("load_input intrinsics are invalid for the GS stage"); >> >> - case nir_intrinsic_load_per_vertex_input_indirect: >> - indirect_offset = retype(get_nir_src(instr->src[1]), >> BRW_REGISTER_TYPE_D); >> - /* fallthrough */ >> case nir_intrinsic_load_per_vertex_input: >> - emit_gs_input_load(dest, instr->src[0], >> - indirect_offset, instr->const_index[0], >> + emit_gs_input_load(dest, instr->src[0], instr->src[1], >> instr->num_components); >> break; >> >> @@ -2104,8 +2100,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, >> nir_intrinsic_instr *instr >> if (nir_intrinsic_infos[instr->intrinsic].has_dest) >> dest = get_nir_dest(instr->dest); >> >> - bool has_indirect = false; >> - >> switch (instr->intrinsic) { >> case nir_intrinsic_atomic_counter_inc: >> case nir_intrinsic_atomic_counter_dec: >> @@ -2294,27 +2288,23 @@ fs_visitor::nir_emit_intrinsic(const fs_builder >> &bld, nir_intrinsic_instr *instr >> bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), brw_imm_d(1)); >> break; >> >> - case nir_intrinsic_load_uniform_indirect: >> - has_indirect = true; >> - /* fallthrough */ >> case nir_intrinsic_load_uniform: { >> fs_reg uniform_reg(UNIFORM, instr->const_index[0]); >> - uniform_reg.reg_offset = instr->const_index[1]; >> + uniform_reg.type = dest.type; >> >> - for (unsigned j = 0; j < instr->num_components; j++) { >> - fs_reg src = offset(retype(uniform_reg, dest.type), bld, j); >> - if (has_indirect) >> - src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0])); >> + nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]); >> + if (const_offset) { >> + uniform_reg.reg_offset = const_offset->u[0]; >> + } else { >> + uniform_reg.reladdr = new(mem_ctx) >> fs_reg(get_nir_src(instr->src[0])); >> + } >> >> - bld.MOV(dest, src); >> - dest = offset(dest, bld, 1); >> + for (unsigned j = 0; j < instr->num_components; j++) { >> + bld.MOV(offset(dest, bld, j), offset(uniform_reg, bld, j)); >> } >> break; >> } >> >> - case nir_intrinsic_load_ubo_indirect: >> - has_indirect = true; >> - /* fallthrough */ >> case nir_intrinsic_load_ubo: { >> nir_const_value *const_index = nir_src_as_const_value(instr->src[0]); >> fs_reg surf_index; >> @@ -2342,27 +2332,28 @@ fs_visitor::nir_emit_intrinsic(const fs_builder >> &bld, nir_intrinsic_instr *instr >> nir->info.num_ubos - 1); >> } >> >> - if (has_indirect) { >> + nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]); >> + >> + if (const_offset == NULL) { >> /* Turn the byte offset into a dword offset. */ >> fs_reg base_offset = vgrf(glsl_type::int_type); >> bld.SHR(base_offset, retype(get_nir_src(instr->src[1]), >> BRW_REGISTER_TYPE_D), >> brw_imm_d(2)); >> >> - unsigned vec4_offset = instr->const_index[0] / 4; >> for (int i = 0; i < instr->num_components; i++) >> VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), >> surf_index, >> - base_offset, vec4_offset + i); >> + base_offset, i); > > Shouldn't converting UBOs from vec4 offsets to byte offsets be done > separately from removing the _indirect intrinsics? We're already > squashing a lot together, and that seems like an actual separable > change. I did not conflate the changes, merely got rid of vec4_offset. >> } else { >> fs_reg packed_consts = vgrf(glsl_type::float_type); >> packed_consts.type = dest.type; >> >> - struct brw_reg const_offset_reg = brw_imm_ud(instr->const_index[0] >> & ~15); >> + struct brw_reg const_offset_reg = brw_imm_ud(const_offset->u[0] & >> ~15); >> bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts, >> surf_index, const_offset_reg); >> >> for (unsigned i = 0; i < instr->num_components; i++) { >> - packed_consts.set_smear(instr->const_index[0] % 16 / 4 + i); >> + packed_consts.set_smear(const_offset->u[0] % 16 / 4 + i); >> >> /* The std140 packing rules don't allow vectors to cross 16-byte >> * boundaries, and a reg is 32 bytes. >> @@ -2376,9 +2367,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, >> nir_intrinsic_instr *instr >> break; >> } >> >> - case nir_intrinsic_load_ssbo_indirect: >> - has_indirect = true; >> - /* fallthrough */ >> case nir_intrinsic_load_ssbo: { >> assert(devinfo->gen >= 7); >> >> @@ -2404,12 +2392,12 @@ fs_visitor::nir_emit_intrinsic(const fs_builder >> &bld, nir_intrinsic_instr *instr >> nir->info.num_ssbos - 1); >> } >> >> - /* Get the offset to read from */ >> fs_reg offset_reg; >> - if (has_indirect) { >> - offset_reg = get_nir_src(instr->src[1]); >> + nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]); >> + if (const_offset) { >> + offset_reg = brw_imm_ud(const_offset->u[0]); >> } else { >> - offset_reg = brw_imm_ud(instr->const_index[0]); >> + offset_reg = get_nir_src(instr->src[1]); >> } > > This pattern comes up quite a bit - I actually have a patch in my tess > branch that adds a get_nir_src_imm() helpers so you can do this in one > line. I just mailed it out, in case you're interested. Yeah, I should grab that and pull it in. >> >> /* Read the vector */ >> @@ -2424,32 +2412,27 @@ fs_visitor::nir_emit_intrinsic(const fs_builder >> &bld, nir_intrinsic_instr *instr >> break; >> } >> >> - case nir_intrinsic_load_input_indirect: >> - has_indirect = true; >> - /* fallthrough */ >> case nir_intrinsic_load_input: { >> - unsigned index = 0; >> - for (unsigned j = 0; j < instr->num_components; j++) { >> - fs_reg src; >> - if (stage == MESA_SHADER_VERTEX) { >> - src = offset(fs_reg(ATTR, instr->const_index[0], dest.type), >> bld, index); >> - } else { >> - src = offset(retype(nir_inputs, dest.type), bld, >> - instr->const_index[0] + index); >> - } >> - if (has_indirect) >> - src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0])); >> - index++; >> + fs_reg src; >> + if (stage == MESA_SHADER_VERTEX) { >> + src = fs_reg(ATTR, 0, dest.type); >> + } else { >> + src = retype(nir_inputs, dest.type); >> + } >> >> - bld.MOV(dest, src); >> - dest = offset(dest, bld, 1); >> + nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]); >> + if (const_offset) { >> + src = offset(src, bld, const_offset->u[0]); >> + } else { >> + src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0])); >> + } >> + >> + for (unsigned j = 0; j < instr->num_components; j++) { >> + bld.MOV(offset(dest, bld, j), offset(src, bld, j)); >> } >> break; >> } >> >> - case nir_intrinsic_store_ssbo_indirect: >> - has_indirect = true; >> - /* fallthrough */ >> case nir_intrinsic_store_ssbo: { >> assert(devinfo->gen >= 7); >> >> @@ -2476,7 +2459,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, >> nir_intrinsic_instr *instr >> fs_reg val_reg = get_nir_src(instr->src[0]); >> >> /* Writemask */ >> - unsigned writemask = instr->const_index[1]; >> + unsigned writemask = instr->const_index[0]; >> >> /* Combine groups of consecutive enabled channels in one write >> * message. We use ffs to find the first enabled channel and then ffs >> on >> @@ -2486,10 +2469,11 @@ fs_visitor::nir_emit_intrinsic(const fs_builder >> &bld, nir_intrinsic_instr *instr >> while (writemask) { >> unsigned first_component = ffs(writemask) - 1; >> unsigned length = ffs(~(writemask >> first_component)) - 1; >> - fs_reg offset_reg; >> >> - if (!has_indirect) { >> - offset_reg = brw_imm_ud(instr->const_index[0] + 4 * >> first_component); >> + fs_reg offset_reg; >> + nir_const_value *const_offset = >> nir_src_as_const_value(instr->src[2]); >> + if (const_offset) { >> + offset_reg = brw_imm_ud(const_offset->u[0] + 4 * >> first_component); >> } else { >> offset_reg = vgrf(glsl_type::uint_type); >> bld.ADD(offset_reg, >> @@ -2510,20 +2494,19 @@ fs_visitor::nir_emit_intrinsic(const fs_builder >> &bld, nir_intrinsic_instr *instr >> break; >> } >> >> - case nir_intrinsic_store_output_indirect: >> - has_indirect = true; >> - /* fallthrough */ >> case nir_intrinsic_store_output: { >> fs_reg src = get_nir_src(instr->src[0]); >> - unsigned index = 0; >> + fs_reg new_dest = retype(nir_outputs, src.type); >> + >> + nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]); >> + if (const_offset) { >> + new_dest = offset(new_dest, bld, const_offset->u[0]); >> + } else { >> + src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[1])); >> + } >> + >> for (unsigned j = 0; j < instr->num_components; j++) { >> - fs_reg new_dest = offset(retype(nir_outputs, src.type), bld, >> - instr->const_index[0] + index); >> - if (has_indirect) >> - src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[1])); >> - index++; >> - bld.MOV(new_dest, src); >> - src = offset(src, bld, 1); >> + bld.MOV(offset(new_dest, bld, j), offset(src, bld, j)); >> } >> break; >> } >> diff --git a/src/mesa/drivers/dri/i965/brw_nir.c >> b/src/mesa/drivers/dri/i965/brw_nir.c >> index bf3bc2d..85b4acc 100644 >> --- a/src/mesa/drivers/dri/i965/brw_nir.c >> +++ b/src/mesa/drivers/dri/i965/brw_nir.c >> @@ -25,21 +25,26 @@ >> #include "brw_shader.h" >> #include "glsl/glsl_parser_extras.h" >> #include "glsl/nir/glsl_to_nir.h" >> +#include "glsl/nir/nir_builder.h" >> #include "program/prog_to_nir.h" >> >> +struct remap_vs_attrs_state { >> + nir_builder b; >> + GLbitfield64 inputs_read; >> +}; >> + >> static bool >> remap_vs_attrs(nir_block *block, void *closure) >> { >> - GLbitfield64 inputs_read = *((GLbitfield64 *) closure); >> + struct remap_vs_attrs_state *state = closure; >> >> - nir_foreach_instr(block, instr) { >> + nir_foreach_instr_safe(block, instr) { >> if (instr->type != nir_instr_type_intrinsic) >> continue; >> >> nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); >> >> - /* We set EmitNoIndirect for VS inputs, so there are no indirects. */ >> - assert(intrin->intrinsic != nir_intrinsic_load_input_indirect); >> + state->b.cursor = nir_before_instr(&intrin->instr); >> >> if (intrin->intrinsic == nir_intrinsic_load_input) { >> /* Attributes come in a contiguous block, ordered by their >> @@ -47,9 +52,15 @@ remap_vs_attrs(nir_block *block, void *closure) >> * number for an attribute by masking out the enabled attributes >> * before it and counting the bits. >> */ >> - int attr = intrin->const_index[0]; >> - int slot = _mesa_bitcount_64(inputs_read & BITFIELD64_MASK(attr)); >> - intrin->const_index[0] = 4 * slot; >> + nir_const_value *const_attr = >> nir_src_as_const_value(intrin->src[0]); >> + >> + /* We set EmitNoIndirect for VS inputs, so there are no indirects. >> */ >> + assert(const_attr != NULL); >> + >> + int attr = const_attr->u[0]; >> + int slot = _mesa_bitcount_64(state->inputs_read & >> BITFIELD64_MASK(attr)); >> + nir_instr_rewrite_src(&intrin->instr, &intrin->src[0], >> + nir_src_for_ssa(nir_imm_int(&state->b, 4 * >> slot))); >> } >> } >> return true; >> @@ -80,10 +91,17 @@ brw_nir_lower_inputs(nir_shader *nir, >> * key->inputs_read since the two are identical aside from Gen4-5 >> * edge flag differences. >> */ >> - GLbitfield64 inputs_read = nir->info.inputs_read; >> + >> + /* This pass needs actual constants */ >> + nir_opt_constant_folding(nir); > > Is this sufficient? We don't need copy prop to eliminate imovs/fmovs > as well? It should be. constant folding can fold movs just fine. >> + >> + struct remap_vs_attrs_state state = { >> + .inputs_read = nir->info.inputs_read >> + }; >> nir_foreach_overload(nir, overload) { >> if (overload->impl) { >> - nir_foreach_block(overload->impl, remap_vs_attrs, >> &inputs_read); >> + nir_builder_init(&state.b, overload->impl); >> + nir_foreach_block(overload->impl, remap_vs_attrs, &state); >> } >> } >> } >> _______________________________________________ mesa-dev mailing list mesa-dev@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/mesa-dev