Signed-off-by: Karol Herbst <kher...@redhat.com> --- src/compiler/spirv/spirv_to_nir.c | 5 +- src/compiler/spirv/vtn_alu.c | 187 +++++++++++++++++------------- src/compiler/spirv/vtn_private.h | 3 + 3 files changed, 115 insertions(+), 80 deletions(-)
diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c index 8c341e9c1fa..cbd40df7473 100644 --- a/src/compiler/spirv/spirv_to_nir.c +++ b/src/compiler/spirv/spirv_to_nir.c @@ -4067,7 +4067,6 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpConvertUToPtr: case SpvOpPtrCastToGeneric: case SpvOpGenericCastToPtr: - case SpvOpBitcast: case SpvOpIsNan: case SpvOpIsInf: case SpvOpIsFinite: @@ -4152,6 +4151,10 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, vtn_handle_alu(b, opcode, w, count); break; + case SpvOpBitcast: + vtn_handle_bitcast(b, opcode, w, count); + break; + case SpvOpVectorExtractDynamic: case SpvOpVectorInsertDynamic: case SpvOpVectorShuffle: diff --git a/src/compiler/spirv/vtn_alu.c b/src/compiler/spirv/vtn_alu.c index 32825da29cb..e1088a7e9db 100644 --- a/src/compiler/spirv/vtn_alu.c +++ b/src/compiler/spirv/vtn_alu.c @@ -211,81 +211,6 @@ vtn_handle_matrix_alu(struct vtn_builder *b, SpvOp opcode, } } -static void -vtn_handle_bitcast(struct vtn_builder *b, struct vtn_ssa_value *dest, - struct nir_ssa_def *src) -{ - if (glsl_get_vector_elements(dest->type) == src->num_components) { - /* From the definition of OpBitcast in the SPIR-V 1.2 spec: - * - * "If Result Type has the same number of components as Operand, they - * must also have the same component width, and results are computed per - * component." - */ - dest->def = nir_imov(&b->nb, src); - return; - } - - /* From the definition of OpBitcast in the SPIR-V 1.2 spec: - * - * "If Result Type has a different number of components than Operand, the - * total number of bits in Result Type must equal the total number of bits - * in Operand. Let L be the type, either Result Type or Operand’s type, that - * has the larger number of components. Let S be the other type, with the - * smaller number of components. The number of components in L must be an - * integer multiple of the number of components in S. The first component - * (that is, the only or lowest-numbered component) of S maps to the first - * components of L, and so on, up to the last component of S mapping to the - * last components of L. Within this mapping, any single component of S - * (mapping to multiple components of L) maps its lower-ordered bits to the - * lower-numbered components of L." - */ - unsigned src_bit_size = src->bit_size; - unsigned dest_bit_size = glsl_get_bit_size(dest->type); - unsigned src_components = src->num_components; - unsigned dest_components = glsl_get_vector_elements(dest->type); - vtn_assert(src_bit_size * src_components == dest_bit_size * dest_components); - - nir_ssa_def *dest_chan[NIR_MAX_VEC_COMPONENTS]; - if (src_bit_size > dest_bit_size) { - vtn_assert(src_bit_size % dest_bit_size == 0); - unsigned divisor = src_bit_size / dest_bit_size; - for (unsigned comp = 0; comp < src_components; comp++) { - nir_ssa_def *split; - if (src_bit_size == 64) { - assert(dest_bit_size == 32 || dest_bit_size == 16); - split = dest_bit_size == 32 ? - nir_unpack_64_2x32(&b->nb, nir_channel(&b->nb, src, comp)) : - nir_unpack_64_4x16(&b->nb, nir_channel(&b->nb, src, comp)); - } else { - vtn_assert(src_bit_size == 32); - vtn_assert(dest_bit_size == 16); - split = nir_unpack_32_2x16(&b->nb, nir_channel(&b->nb, src, comp)); - } - for (unsigned i = 0; i < divisor; i++) - dest_chan[divisor * comp + i] = nir_channel(&b->nb, split, i); - } - } else { - vtn_assert(dest_bit_size % src_bit_size == 0); - unsigned divisor = dest_bit_size / src_bit_size; - for (unsigned comp = 0; comp < dest_components; comp++) { - unsigned channels = ((1 << divisor) - 1) << (comp * divisor); - nir_ssa_def *src_chan = nir_channels(&b->nb, src, channels); - if (dest_bit_size == 64) { - assert(src_bit_size == 32 || src_bit_size == 16); - dest_chan[comp] = src_bit_size == 32 ? - nir_pack_64_2x32(&b->nb, src_chan) : - nir_pack_64_4x16(&b->nb, src_chan); - } else { - vtn_assert(dest_bit_size == 32); - vtn_assert(src_bit_size == 16); - dest_chan[comp] = nir_pack_32_2x16(&b->nb, src_chan); - } - } - } - dest->def = nir_vec(&b->nb, dest_chan, dest_components); -} - nir_op vtn_nir_alu_op_for_spirv_opcode(struct vtn_builder *b, SpvOp opcode, bool *swap, @@ -451,6 +376,114 @@ handle_rounding_mode(struct vtn_builder *b, struct vtn_value *val, int member, } } +static nir_ssa_def* +_vtn_handle_bitcast(struct vtn_builder *b, const struct glsl_type *dest_type, + struct nir_ssa_def *src) +{ + if (glsl_get_vector_elements(dest_type) == src->num_components) { + /* From the definition of OpBitcast in the SPIR-V 1.2 spec: + * + * "If Result Type has the same number of components as Operand, they + * must also have the same component width, and results are computed per + * component." + */ + return nir_imov(&b->nb, src); + } + + /* From the definition of OpBitcast in the SPIR-V 1.2 spec: + * + * "If Result Type has a different number of components than Operand, the + * total number of bits in Result Type must equal the total number of bits + * in Operand. Let L be the type, either Result Type or Operand’s type, that + * has the larger number of components. Let S be the other type, with the + * smaller number of components. The number of components in L must be an + * integer multiple of the number of components in S. The first component + * (that is, the only or lowest-numbered component) of S maps to the first + * components of L, and so on, up to the last component of S mapping to the + * last components of L. Within this mapping, any single component of S + * (mapping to multiple components of L) maps its lower-ordered bits to the + * lower-numbered components of L." + */ + unsigned src_bit_size = src->bit_size; + unsigned dest_bit_size = glsl_get_bit_size(dest_type); + unsigned src_components = src->num_components; + unsigned dest_components = glsl_get_vector_elements(dest_type); + vtn_assert(src_bit_size * src_components == dest_bit_size * dest_components); + + nir_ssa_def *dest_chan[NIR_MAX_VEC_COMPONENTS]; + if (src_bit_size > dest_bit_size) { + vtn_assert(src_bit_size % dest_bit_size == 0); + unsigned divisor = src_bit_size / dest_bit_size; + for (unsigned comp = 0; comp < src_components; comp++) { + nir_ssa_def *split; + if (src_bit_size == 64) { + assert(dest_bit_size == 32 || dest_bit_size == 16); + split = dest_bit_size == 32 ? + nir_unpack_64_2x32(&b->nb, nir_channel(&b->nb, src, comp)) : + nir_unpack_64_4x16(&b->nb, nir_channel(&b->nb, src, comp)); + } else { + vtn_assert(src_bit_size == 32); + vtn_assert(dest_bit_size == 16); + split = nir_unpack_32_2x16(&b->nb, nir_channel(&b->nb, src, comp)); + } + for (unsigned i = 0; i < divisor; i++) + dest_chan[divisor * comp + i] = nir_channel(&b->nb, split, i); + } + } else { + vtn_assert(dest_bit_size % src_bit_size == 0); + unsigned divisor = dest_bit_size / src_bit_size; + for (unsigned comp = 0; comp < dest_components; comp++) { + unsigned channels = ((1 << divisor) - 1) << (comp * divisor); + nir_ssa_def *src_chan = nir_channels(&b->nb, src, channels); + if (dest_bit_size == 64) { + assert(src_bit_size == 32 || src_bit_size == 16); + dest_chan[comp] = src_bit_size == 32 ? + nir_pack_64_2x32(&b->nb, src_chan) : + nir_pack_64_4x16(&b->nb, src_chan); + } else { + vtn_assert(dest_bit_size == 32); + vtn_assert(src_bit_size == 16); + dest_chan[comp] = nir_pack_32_2x16(&b->nb, src_chan); + } + } + } + return nir_vec(&b->nb, dest_chan, dest_components); +} + +void +vtn_handle_bitcast(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) +{ + assert(opcode == SpvOpBitcast); + + struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type; + struct vtn_value *src_val = vtn_untyped_value(b, w[3]); + + struct nir_ssa_def *src; + struct vtn_value *val; + if (src_val->value_type == vtn_value_type_pointer) { + src = vtn_pointer_to_ssa(b, src_val->pointer); + } else { + vtn_assert(glsl_type_is_vector_or_scalar(src_val->ssa->type)); + src = src_val->ssa->def; + } + + struct nir_ssa_def *res = _vtn_handle_bitcast(b, type->type, src); + + if (type->base_type == vtn_base_type_pointer) { + val = vtn_push_value(b, w[2], vtn_value_type_pointer); + val->pointer = vtn_pointer_from_ssa(b, res, type); + val->pointer->offset = res; + } else { + val = vtn_push_value(b, w[2], vtn_value_type_ssa); + val->ssa = vtn_create_ssa_value(b, type->type); + val->ssa->def = res; + } + vtn_foreach_decoration(b, val, handle_no_contraction, NULL); + + b->nb.exact = false; +} + void vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) @@ -664,10 +697,6 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, break; } - case SpvOpBitcast: - vtn_handle_bitcast(b, val->ssa, src[0]); - break; - case SpvOpFConvert: { nir_alu_type src_alu_type = nir_get_nir_type_for_glsl_type(vtn_src[0]->type); nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(type); diff --git a/src/compiler/spirv/vtn_private.h b/src/compiler/spirv/vtn_private.h index 8ccebaabef9..996f0803941 100644 --- a/src/compiler/spirv/vtn_private.h +++ b/src/compiler/spirv/vtn_private.h @@ -750,6 +750,9 @@ nir_op vtn_nir_alu_op_for_spirv_opcode(struct vtn_builder *b, void vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count); +void vtn_handle_bitcast(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count); + void vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count); -- 2.19.1 _______________________________________________ mesa-dev mailing list mesa-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/mesa-dev