--- src/amd/common/ac_nir_to_llvm.c | 360 ++++++++++++++++++++-------------------- 1 file changed, 179 insertions(+), 181 deletions(-)
diff --git a/src/amd/common/ac_nir_to_llvm.c b/src/amd/common/ac_nir_to_llvm.c index 2ec30517e0..d792042925 100644 --- a/src/amd/common/ac_nir_to_llvm.c +++ b/src/amd/common/ac_nir_to_llvm.c @@ -130,21 +130,20 @@ struct nir_to_llvm_context { LLVMValueRef hs_ring_tess_factor; LLVMValueRef prim_mask; LLVMValueRef sample_pos_offset; LLVMValueRef persp_sample, persp_center, persp_centroid; LLVMValueRef linear_sample, linear_center, linear_centroid; LLVMTypeRef i1; LLVMTypeRef i8; LLVMTypeRef i16; - LLVMTypeRef i32; LLVMTypeRef i64; LLVMTypeRef v2i32; LLVMTypeRef v3i32; LLVMTypeRef v4i32; LLVMTypeRef v8i32; LLVMTypeRef f64; LLVMTypeRef f32; LLVMTypeRef f16; LLVMTypeRef v2f32; LLVMTypeRef v4f32; @@ -461,29 +460,29 @@ static LLVMValueRef get_tcs_out_patch_stride(struct nir_to_llvm_context *ctx) { return unpack_param(&ctx->ac, ctx->tcs_out_layout, 0, 13); } static LLVMValueRef get_tcs_out_patch0_offset(struct nir_to_llvm_context *ctx) { return LLVMBuildMul(ctx->builder, unpack_param(&ctx->ac, ctx->tcs_out_offsets, 0, 16), - LLVMConstInt(ctx->i32, 4, false), ""); + LLVMConstInt(ctx->ac.i32, 4, false), ""); } static LLVMValueRef get_tcs_out_patch0_patch_data_offset(struct nir_to_llvm_context *ctx) { return LLVMBuildMul(ctx->builder, unpack_param(&ctx->ac, ctx->tcs_out_offsets, 16, 16), - LLVMConstInt(ctx->i32, 4, false), ""); + LLVMConstInt(ctx->ac.i32, 4, false), ""); } static LLVMValueRef get_tcs_in_current_patch_offset(struct nir_to_llvm_context *ctx) { LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx); LLVMValueRef rel_patch_id = get_rel_patch_id(ctx); return LLVMBuildMul(ctx->builder, patch_stride, rel_patch_id, ""); } @@ -663,21 +662,21 @@ radv_define_common_user_sgprs_phase2(struct nir_to_llvm_context *ctx, } else ctx->descriptor_sets[i] = NULL; } } else { uint32_t desc_sgpr_idx = *user_sgpr_idx; set_userdata_location_shader(ctx, AC_UD_INDIRECT_DESCRIPTOR_SETS, user_sgpr_idx, 2); for (unsigned i = 0; i < num_sets; ++i) { if (ctx->options->layout->set[i].layout->shader_stages & stage_mask) { set_userdata_location_indirect(&ctx->shader_info->user_sgprs_locs.descriptor_sets[i], desc_sgpr_idx, 2, i * 8); - ctx->descriptor_sets[i] = ac_build_load_to_sgpr(&ctx->ac, desc_sets, LLVMConstInt(ctx->i32, i, false)); + ctx->descriptor_sets[i] = ac_build_load_to_sgpr(&ctx->ac, desc_sets, LLVMConstInt(ctx->ac.i32, i, false)); } else ctx->descriptor_sets[i] = NULL; } ctx->shader_info->need_indirect_descriptor_sets = true; } if (ctx->shader_info->info.needs_push_constants) { set_userdata_location_shader(ctx, AC_UD_PUSH_CONSTANTS, user_sgpr_idx, 2); } @@ -686,24 +685,24 @@ radv_define_common_user_sgprs_phase2(struct nir_to_llvm_context *ctx, static void radv_define_vs_user_sgprs_phase1(struct nir_to_llvm_context *ctx, gl_shader_stage stage, bool has_previous_stage, gl_shader_stage previous_stage, struct arg_info *args) { if (!ctx->is_gs_copy_shader && (stage == MESA_SHADER_VERTEX || (has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) { if (ctx->shader_info->info.vs.has_vertex_buffers) add_user_sgpr_argument(args, const_array(ctx->v4i32, 16), &ctx->vertex_buffers); /* vertex buffers */ - add_user_sgpr_argument(args, ctx->i32, &ctx->abi.base_vertex); // base vertex - add_user_sgpr_argument(args, ctx->i32, &ctx->abi.start_instance);// start instance + add_user_sgpr_argument(args, ctx->ac.i32, &ctx->abi.base_vertex); // base vertex + add_user_sgpr_argument(args, ctx->ac.i32, &ctx->abi.start_instance);// start instance if (ctx->shader_info->info.vs.needs_draw_id) - add_user_sgpr_argument(args, ctx->i32, &ctx->abi.draw_id); // draw id + add_user_sgpr_argument(args, ctx->ac.i32, &ctx->abi.draw_id); // draw id } } static void radv_define_vs_user_sgprs_phase2(struct nir_to_llvm_context *ctx, gl_shader_stage stage, bool has_previous_stage, gl_shader_stage previous_stage, uint8_t *user_sgpr_idx) { @@ -733,179 +732,179 @@ static void create_function(struct nir_to_llvm_context *ctx, allocate_user_sgprs(ctx, &user_sgpr_info); if (user_sgpr_info.need_ring_offsets && !ctx->options->supports_spill) { add_user_sgpr_argument(&args, const_array(ctx->v4i32, 16), &ctx->ring_offsets); /* address of rings */ } switch (stage) { case MESA_SHADER_COMPUTE: radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets); if (ctx->shader_info->info.cs.grid_components_used) - add_user_sgpr_argument(&args, LLVMVectorType(ctx->i32, ctx->shader_info->info.cs.grid_components_used), &ctx->num_work_groups); /* grid size */ - add_sgpr_argument(&args, LLVMVectorType(ctx->i32, 3), &ctx->workgroup_ids); - add_sgpr_argument(&args, ctx->i32, &ctx->tg_size); - add_vgpr_argument(&args, LLVMVectorType(ctx->i32, 3), &ctx->local_invocation_ids); + add_user_sgpr_argument(&args, LLVMVectorType(ctx->ac.i32, ctx->shader_info->info.cs.grid_components_used), &ctx->num_work_groups); /* grid size */ + add_sgpr_argument(&args, LLVMVectorType(ctx->ac.i32, 3), &ctx->workgroup_ids); + add_sgpr_argument(&args, ctx->ac.i32, &ctx->tg_size); + add_vgpr_argument(&args, LLVMVectorType(ctx->ac.i32, 3), &ctx->local_invocation_ids); break; case MESA_SHADER_VERTEX: radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets); radv_define_vs_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &args); if (ctx->shader_info->info.needs_multiview_view_index || (!ctx->options->key.vs.as_es && !ctx->options->key.vs.as_ls && ctx->options->key.has_multiview_view_index)) - add_user_sgpr_argument(&args, ctx->i32, &ctx->view_index); + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index); if (ctx->options->key.vs.as_es) - add_sgpr_argument(&args, ctx->i32, &ctx->es2gs_offset); // es2gs offset + add_sgpr_argument(&args, ctx->ac.i32, &ctx->es2gs_offset); // es2gs offset else if (ctx->options->key.vs.as_ls) - add_user_sgpr_argument(&args, ctx->i32, &ctx->ls_out_layout); // ls out layout - add_vgpr_argument(&args, ctx->i32, &ctx->abi.vertex_id); // vertex id + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->ls_out_layout); // ls out layout + add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.vertex_id); // vertex id if (!ctx->is_gs_copy_shader) { - add_vgpr_argument(&args, ctx->i32, &ctx->rel_auto_id); // rel auto id - add_vgpr_argument(&args, ctx->i32, &ctx->vs_prim_id); // vs prim id - add_vgpr_argument(&args, ctx->i32, &ctx->abi.instance_id); // instance id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->rel_auto_id); // rel auto id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->vs_prim_id); // vs prim id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.instance_id); // instance id } break; case MESA_SHADER_TESS_CTRL: if (has_previous_stage) { // First 6 system regs - add_sgpr_argument(&args, ctx->i32, &ctx->oc_lds); // param oc lds - add_sgpr_argument(&args, ctx->i32, &ctx->merged_wave_info); // merged wave info - add_sgpr_argument(&args, ctx->i32, &ctx->tess_factor_offset); // tess factor offset + add_sgpr_argument(&args, ctx->ac.i32, &ctx->oc_lds); // param oc lds + add_sgpr_argument(&args, ctx->ac.i32, &ctx->merged_wave_info); // merged wave info + add_sgpr_argument(&args, ctx->ac.i32, &ctx->tess_factor_offset); // tess factor offset - add_sgpr_argument(&args, ctx->i32, NULL); // scratch offset - add_sgpr_argument(&args, ctx->i32, NULL); // unknown - add_sgpr_argument(&args, ctx->i32, NULL); // unknown + add_sgpr_argument(&args, ctx->ac.i32, NULL); // scratch offset + add_sgpr_argument(&args, ctx->ac.i32, NULL); // unknown + add_sgpr_argument(&args, ctx->ac.i32, NULL); // unknown radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets); radv_define_vs_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &args); - add_user_sgpr_argument(&args, ctx->i32, &ctx->ls_out_layout); // ls out layout + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->ls_out_layout); // ls out layout - add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_offchip_layout); // tcs offchip layout - add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_out_offsets); // tcs out offsets - add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_out_layout); // tcs out layout - add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_in_layout); // tcs in layout + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_offchip_layout); // tcs offchip layout + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_out_offsets); // tcs out offsets + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_out_layout); // tcs out layout + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_in_layout); // tcs in layout if (ctx->shader_info->info.needs_multiview_view_index) - add_user_sgpr_argument(&args, ctx->i32, &ctx->view_index); - - add_vgpr_argument(&args, ctx->i32, &ctx->tcs_patch_id); // patch id - add_vgpr_argument(&args, ctx->i32, &ctx->tcs_rel_ids); // rel ids; - add_vgpr_argument(&args, ctx->i32, &ctx->abi.vertex_id); // vertex id - add_vgpr_argument(&args, ctx->i32, &ctx->rel_auto_id); // rel auto id - add_vgpr_argument(&args, ctx->i32, &ctx->vs_prim_id); // vs prim id - add_vgpr_argument(&args, ctx->i32, &ctx->abi.instance_id); // instance id + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index); + + add_vgpr_argument(&args, ctx->ac.i32, &ctx->tcs_patch_id); // patch id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->tcs_rel_ids); // rel ids; + add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.vertex_id); // vertex id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->rel_auto_id); // rel auto id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->vs_prim_id); // vs prim id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.instance_id); // instance id } else { radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets); - add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_offchip_layout); // tcs offchip layout - add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_out_offsets); // tcs out offsets - add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_out_layout); // tcs out layout - add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_in_layout); // tcs in layout + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_offchip_layout); // tcs offchip layout + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_out_offsets); // tcs out offsets + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_out_layout); // tcs out layout + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_in_layout); // tcs in layout if (ctx->shader_info->info.needs_multiview_view_index) - add_user_sgpr_argument(&args, ctx->i32, &ctx->view_index); - add_sgpr_argument(&args, ctx->i32, &ctx->oc_lds); // param oc lds - add_sgpr_argument(&args, ctx->i32, &ctx->tess_factor_offset); // tess factor offset - add_vgpr_argument(&args, ctx->i32, &ctx->tcs_patch_id); // patch id - add_vgpr_argument(&args, ctx->i32, &ctx->tcs_rel_ids); // rel ids; + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index); + add_sgpr_argument(&args, ctx->ac.i32, &ctx->oc_lds); // param oc lds + add_sgpr_argument(&args, ctx->ac.i32, &ctx->tess_factor_offset); // tess factor offset + add_vgpr_argument(&args, ctx->ac.i32, &ctx->tcs_patch_id); // patch id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->tcs_rel_ids); // rel ids; } break; case MESA_SHADER_TESS_EVAL: radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets); - add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_offchip_layout); // tcs offchip layout + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_offchip_layout); // tcs offchip layout if (ctx->shader_info->info.needs_multiview_view_index || (!ctx->options->key.tes.as_es && ctx->options->key.has_multiview_view_index)) - add_user_sgpr_argument(&args, ctx->i32, &ctx->view_index); + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index); if (ctx->options->key.tes.as_es) { - add_sgpr_argument(&args, ctx->i32, &ctx->oc_lds); // OC LDS - add_sgpr_argument(&args, ctx->i32, NULL); // - add_sgpr_argument(&args, ctx->i32, &ctx->es2gs_offset); // es2gs offset + add_sgpr_argument(&args, ctx->ac.i32, &ctx->oc_lds); // OC LDS + add_sgpr_argument(&args, ctx->ac.i32, NULL); // + add_sgpr_argument(&args, ctx->ac.i32, &ctx->es2gs_offset); // es2gs offset } else { - add_sgpr_argument(&args, ctx->i32, NULL); // - add_sgpr_argument(&args, ctx->i32, &ctx->oc_lds); // OC LDS + add_sgpr_argument(&args, ctx->ac.i32, NULL); // + add_sgpr_argument(&args, ctx->ac.i32, &ctx->oc_lds); // OC LDS } add_vgpr_argument(&args, ctx->f32, &ctx->tes_u); // tes_u add_vgpr_argument(&args, ctx->f32, &ctx->tes_v); // tes_v - add_vgpr_argument(&args, ctx->i32, &ctx->tes_rel_patch_id); // tes rel patch id - add_vgpr_argument(&args, ctx->i32, &ctx->tes_patch_id); // tes patch id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->tes_rel_patch_id); // tes rel patch id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->tes_patch_id); // tes patch id break; case MESA_SHADER_GEOMETRY: if (has_previous_stage) { // First 6 system regs - add_sgpr_argument(&args, ctx->i32, &ctx->gs2vs_offset); // tess factor offset - add_sgpr_argument(&args, ctx->i32, &ctx->merged_wave_info); // merged wave info - add_sgpr_argument(&args, ctx->i32, &ctx->oc_lds); // param oc lds + add_sgpr_argument(&args, ctx->ac.i32, &ctx->gs2vs_offset); // tess factor offset + add_sgpr_argument(&args, ctx->ac.i32, &ctx->merged_wave_info); // merged wave info + add_sgpr_argument(&args, ctx->ac.i32, &ctx->oc_lds); // param oc lds - add_sgpr_argument(&args, ctx->i32, NULL); // scratch offset - add_sgpr_argument(&args, ctx->i32, NULL); // unknown - add_sgpr_argument(&args, ctx->i32, NULL); // unknown + add_sgpr_argument(&args, ctx->ac.i32, NULL); // scratch offset + add_sgpr_argument(&args, ctx->ac.i32, NULL); // unknown + add_sgpr_argument(&args, ctx->ac.i32, NULL); // unknown radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets); if (previous_stage == MESA_SHADER_TESS_EVAL) - add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_offchip_layout); // tcs offchip layout + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_offchip_layout); // tcs offchip layout else radv_define_vs_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &args); - add_user_sgpr_argument(&args, ctx->i32, &ctx->gsvs_ring_stride); // gsvs stride - add_user_sgpr_argument(&args, ctx->i32, &ctx->gsvs_num_entries); // gsvs num entires + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->gsvs_ring_stride); // gsvs stride + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->gsvs_num_entries); // gsvs num entires if (ctx->shader_info->info.needs_multiview_view_index) - add_user_sgpr_argument(&args, ctx->i32, &ctx->view_index); + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index); - add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[0]); // vtx01 - add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[2]); // vtx23 - add_vgpr_argument(&args, ctx->i32, &ctx->gs_prim_id); // prim id - add_vgpr_argument(&args, ctx->i32, &ctx->gs_invocation_id); - add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[4]); + add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[0]); // vtx01 + add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[2]); // vtx23 + add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_prim_id); // prim id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_invocation_id); + add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[4]); if (previous_stage == MESA_SHADER_VERTEX) { - add_vgpr_argument(&args, ctx->i32, &ctx->abi.vertex_id); // vertex id - add_vgpr_argument(&args, ctx->i32, &ctx->rel_auto_id); // rel auto id - add_vgpr_argument(&args, ctx->i32, &ctx->vs_prim_id); // vs prim id - add_vgpr_argument(&args, ctx->i32, &ctx->abi.instance_id); // instance id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.vertex_id); // vertex id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->rel_auto_id); // rel auto id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->vs_prim_id); // vs prim id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.instance_id); // instance id } else { add_vgpr_argument(&args, ctx->f32, &ctx->tes_u); // tes_u add_vgpr_argument(&args, ctx->f32, &ctx->tes_v); // tes_v - add_vgpr_argument(&args, ctx->i32, &ctx->tes_rel_patch_id); // tes rel patch id - add_vgpr_argument(&args, ctx->i32, &ctx->tes_patch_id); // tes patch id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->tes_rel_patch_id); // tes rel patch id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->tes_patch_id); // tes patch id } } else { radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets); radv_define_vs_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &args); - add_user_sgpr_argument(&args, ctx->i32, &ctx->gsvs_ring_stride); // gsvs stride - add_user_sgpr_argument(&args, ctx->i32, &ctx->gsvs_num_entries); // gsvs num entires + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->gsvs_ring_stride); // gsvs stride + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->gsvs_num_entries); // gsvs num entires if (ctx->shader_info->info.needs_multiview_view_index) - add_user_sgpr_argument(&args, ctx->i32, &ctx->view_index); - add_sgpr_argument(&args, ctx->i32, &ctx->gs2vs_offset); // gs2vs offset - add_sgpr_argument(&args, ctx->i32, &ctx->gs_wave_id); // wave id - add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[0]); // vtx0 - add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[1]); // vtx1 - add_vgpr_argument(&args, ctx->i32, &ctx->gs_prim_id); // prim id - add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[2]); - add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[3]); - add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[4]); - add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[5]); - add_vgpr_argument(&args, ctx->i32, &ctx->gs_invocation_id); + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index); + add_sgpr_argument(&args, ctx->ac.i32, &ctx->gs2vs_offset); // gs2vs offset + add_sgpr_argument(&args, ctx->ac.i32, &ctx->gs_wave_id); // wave id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[0]); // vtx0 + add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[1]); // vtx1 + add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_prim_id); // prim id + add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[2]); + add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[3]); + add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[4]); + add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[5]); + add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_invocation_id); } break; case MESA_SHADER_FRAGMENT: radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets); if (ctx->shader_info->info.ps.needs_sample_positions) - add_user_sgpr_argument(&args, ctx->i32, &ctx->sample_pos_offset); /* sample position offset */ - add_sgpr_argument(&args, ctx->i32, &ctx->prim_mask); /* prim mask */ + add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->sample_pos_offset); /* sample position offset */ + add_sgpr_argument(&args, ctx->ac.i32, &ctx->prim_mask); /* prim mask */ add_vgpr_argument(&args, ctx->v2i32, &ctx->persp_sample); /* persp sample */ add_vgpr_argument(&args, ctx->v2i32, &ctx->persp_center); /* persp center */ add_vgpr_argument(&args, ctx->v2i32, &ctx->persp_centroid); /* persp centroid */ add_vgpr_argument(&args, ctx->v3i32, NULL); /* persp pull model */ add_vgpr_argument(&args, ctx->v2i32, &ctx->linear_sample); /* linear sample */ add_vgpr_argument(&args, ctx->v2i32, &ctx->linear_center); /* linear center */ add_vgpr_argument(&args, ctx->v2i32, &ctx->linear_centroid); /* linear centroid */ add_vgpr_argument(&args, ctx->f32, NULL); /* line stipple tex */ add_vgpr_argument(&args, ctx->f32, &ctx->abi.frag_pos[0]); /* pos x float */ add_vgpr_argument(&args, ctx->f32, &ctx->abi.frag_pos[1]); /* pos y float */ add_vgpr_argument(&args, ctx->f32, &ctx->abi.frag_pos[2]); /* pos z float */ add_vgpr_argument(&args, ctx->f32, &ctx->abi.frag_pos[3]); /* pos w float */ - add_vgpr_argument(&args, ctx->i32, &ctx->abi.front_face); /* front face */ - add_vgpr_argument(&args, ctx->i32, &ctx->abi.ancillary); /* ancillary */ - add_vgpr_argument(&args, ctx->i32, &ctx->abi.sample_coverage); /* sample coverage */ - add_vgpr_argument(&args, ctx->i32, NULL); /* fixed pt */ + add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.front_face); /* front face */ + add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.ancillary); /* ancillary */ + add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.sample_coverage); /* sample coverage */ + add_vgpr_argument(&args, ctx->ac.i32, NULL); /* fixed pt */ break; default: unreachable("Shader stage not implemented"); } ctx->main_function = create_llvm_function( ctx->context, ctx->module, ctx->builder, NULL, 0, &args, ctx->max_workgroup_size, ctx->options->unsafe_math); set_llvm_calling_convention(ctx->main_function, stage); @@ -995,26 +994,25 @@ static void create_function(struct nir_to_llvm_context *ctx, ctx->shader_info->num_user_sgprs = user_sgpr_idx; } static void setup_types(struct nir_to_llvm_context *ctx) { ctx->voidt = LLVMVoidTypeInContext(ctx->context); ctx->i1 = LLVMIntTypeInContext(ctx->context, 1); ctx->i8 = LLVMIntTypeInContext(ctx->context, 8); ctx->i16 = LLVMIntTypeInContext(ctx->context, 16); - ctx->i32 = LLVMIntTypeInContext(ctx->context, 32); ctx->i64 = LLVMIntTypeInContext(ctx->context, 64); - ctx->v2i32 = LLVMVectorType(ctx->i32, 2); - ctx->v3i32 = LLVMVectorType(ctx->i32, 3); - ctx->v4i32 = LLVMVectorType(ctx->i32, 4); - ctx->v8i32 = LLVMVectorType(ctx->i32, 8); + ctx->v2i32 = LLVMVectorType(ctx->ac.i32, 2); + ctx->v3i32 = LLVMVectorType(ctx->ac.i32, 3); + ctx->v4i32 = LLVMVectorType(ctx->ac.i32, 4); + ctx->v8i32 = LLVMVectorType(ctx->ac.i32, 8); ctx->f32 = LLVMFloatTypeInContext(ctx->context); ctx->f16 = LLVMHalfTypeInContext(ctx->context); ctx->f64 = LLVMDoubleTypeInContext(ctx->context); ctx->v2f32 = LLVMVectorType(ctx->f32, 2); ctx->v4f32 = LLVMVectorType(ctx->f32, 4); ctx->uniform_md_kind = LLVMGetMDKindIDInContext(ctx->context, "amdgpu.uniform", 14); ctx->empty_md = LLVMMDNodeInContext(ctx->context, NULL, 0); } @@ -1337,39 +1335,39 @@ static LLVMValueRef emit_f2f16(struct nir_to_llvm_context *ctx, LLVMValueRef result; LLVMValueRef cond = NULL; src0 = ac_to_float(&ctx->ac, src0); result = LLVMBuildFPTrunc(ctx->builder, src0, ctx->f16, ""); if (ctx->options->chip_class >= VI) { LLVMValueRef args[2]; /* Check if the result is a denormal - and flush to 0 if so. */ args[0] = result; - args[1] = LLVMConstInt(ctx->i32, N_SUBNORMAL | P_SUBNORMAL, false); + args[1] = LLVMConstInt(ctx->ac.i32, N_SUBNORMAL | P_SUBNORMAL, false); cond = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.class.f16", ctx->i1, args, 2, AC_FUNC_ATTR_READNONE); } /* need to convert back up to f32 */ result = LLVMBuildFPExt(ctx->builder, result, ctx->f32, ""); if (ctx->options->chip_class >= VI) result = LLVMBuildSelect(ctx->builder, cond, ctx->ac.f32_0, result, ""); else { /* for SI/CIK */ /* 0x38800000 is smallest half float value (2^-14) in 32-bit float, * so compare the result and flush to 0 if it's smaller. */ LLVMValueRef temp, cond2; temp = emit_intrin_1f_param(&ctx->ac, "llvm.fabs", ctx->f32, result); cond = LLVMBuildFCmp(ctx->builder, LLVMRealUGT, - LLVMBuildBitCast(ctx->builder, LLVMConstInt(ctx->i32, 0x38800000, false), ctx->f32, ""), + LLVMBuildBitCast(ctx->builder, LLVMConstInt(ctx->ac.i32, 0x38800000, false), ctx->f32, ""), temp, ""); cond2 = LLVMBuildFCmp(ctx->builder, LLVMRealUNE, temp, ctx->ac.f32_0, ""); cond = LLVMBuildAnd(ctx->builder, cond, cond2, ""); result = LLVMBuildSelect(ctx->builder, cond, ctx->ac.f32_0, result, ""); } return result; } static LLVMValueRef emit_umul_high(struct ac_llvm_context *ctx, @@ -2236,41 +2234,41 @@ static LLVMValueRef visit_vulkan_resource_index(struct nir_to_llvm_context *ctx, struct radv_descriptor_set_layout *layout = pipeline_layout->set[desc_set].layout; unsigned base_offset = layout->binding[binding].offset; LLVMValueRef offset, stride; if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) { unsigned idx = pipeline_layout->set[desc_set].dynamic_offset_start + layout->binding[binding].dynamic_offset_offset; desc_ptr = ctx->push_constants; base_offset = pipeline_layout->push_constant_size + 16 * idx; - stride = LLVMConstInt(ctx->i32, 16, false); + stride = LLVMConstInt(ctx->ac.i32, 16, false); } else - stride = LLVMConstInt(ctx->i32, layout->binding[binding].size, false); + stride = LLVMConstInt(ctx->ac.i32, layout->binding[binding].size, false); - offset = LLVMConstInt(ctx->i32, base_offset, false); + offset = LLVMConstInt(ctx->ac.i32, base_offset, false); index = LLVMBuildMul(ctx->builder, index, stride, ""); offset = LLVMBuildAdd(ctx->builder, offset, index, ""); desc_ptr = ac_build_gep0(&ctx->ac, desc_ptr, offset); desc_ptr = cast_ptr(ctx, desc_ptr, ctx->v4i32); LLVMSetMetadata(desc_ptr, ctx->uniform_md_kind, ctx->empty_md); return LLVMBuildLoad(ctx->builder, desc_ptr, ""); } static LLVMValueRef visit_load_push_constant(struct nir_to_llvm_context *ctx, nir_intrinsic_instr *instr) { LLVMValueRef ptr, addr; - addr = LLVMConstInt(ctx->i32, nir_intrinsic_base(instr), 0); + addr = LLVMConstInt(ctx->ac.i32, nir_intrinsic_base(instr), 0); addr = LLVMBuildAdd(ctx->builder, addr, get_src(ctx->nir, instr->src[0]), ""); ptr = ac_build_gep0(&ctx->ac, ctx->push_constants, addr); ptr = cast_ptr(ctx, ptr, get_def_type(ctx->nir, &instr->dest.ssa)); return LLVMBuildLoad(ctx->builder, ptr, ""); } static LLVMValueRef visit_get_buffer_size(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr) @@ -2620,21 +2618,21 @@ static LLVMValueRef get_tcs_tes_buffer_address(struct nir_to_llvm_context *ctx, { LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices; LLVMValueRef param_stride, constant16; LLVMValueRef rel_patch_id = get_rel_patch_id(ctx); vertices_per_patch = unpack_param(&ctx->ac, ctx->tcs_offchip_layout, 9, 6); num_patches = unpack_param(&ctx->ac, ctx->tcs_offchip_layout, 0, 9); total_vertices = LLVMBuildMul(ctx->builder, vertices_per_patch, num_patches, ""); - constant16 = LLVMConstInt(ctx->i32, 16, false); + constant16 = LLVMConstInt(ctx->ac.i32, 16, false); if (vertex_index) { base_addr = LLVMBuildMul(ctx->builder, rel_patch_id, vertices_per_patch, ""); base_addr = LLVMBuildAdd(ctx->builder, base_addr, vertex_index, ""); param_stride = total_vertices; } else { base_addr = rel_patch_id; @@ -2660,26 +2658,26 @@ static LLVMValueRef get_tcs_tes_buffer_address(struct nir_to_llvm_context *ctx, static LLVMValueRef get_tcs_tes_buffer_address_params(struct nir_to_llvm_context *ctx, unsigned param, unsigned const_index, bool is_compact, LLVMValueRef vertex_index, LLVMValueRef indir_index) { LLVMValueRef param_index; if (indir_index) - param_index = LLVMBuildAdd(ctx->builder, LLVMConstInt(ctx->i32, param, false), + param_index = LLVMBuildAdd(ctx->builder, LLVMConstInt(ctx->ac.i32, param, false), indir_index, ""); else { if (const_index && !is_compact) param += const_index; - param_index = LLVMConstInt(ctx->i32, param, false); + param_index = LLVMConstInt(ctx->ac.i32, param, false); } return get_tcs_tes_buffer_address(ctx, vertex_index, param_index); } static void mark_tess_output(struct nir_to_llvm_context *ctx, bool is_patch, uint32_t param) { if (is_patch) { @@ -2703,31 +2701,31 @@ get_dw_address(struct nir_to_llvm_context *ctx, if (vertex_index) { dw_addr = LLVMBuildAdd(ctx->builder, dw_addr, LLVMBuildMul(ctx->builder, vertex_index, stride, ""), ""); } if (indir_index) dw_addr = LLVMBuildAdd(ctx->builder, dw_addr, LLVMBuildMul(ctx->builder, indir_index, - LLVMConstInt(ctx->i32, 4, false), ""), ""); + LLVMConstInt(ctx->ac.i32, 4, false), ""), ""); else if (const_index && !compact_const_index) dw_addr = LLVMBuildAdd(ctx->builder, dw_addr, - LLVMConstInt(ctx->i32, const_index, false), ""); + LLVMConstInt(ctx->ac.i32, const_index, false), ""); dw_addr = LLVMBuildAdd(ctx->builder, dw_addr, - LLVMConstInt(ctx->i32, param * 4, false), ""); + LLVMConstInt(ctx->ac.i32, param * 4, false), ""); if (const_index && compact_const_index) dw_addr = LLVMBuildAdd(ctx->builder, dw_addr, - LLVMConstInt(ctx->i32, const_index, false), ""); + LLVMConstInt(ctx->ac.i32, const_index, false), ""); return dw_addr; } static LLVMValueRef build_varying_gather_values(struct ac_llvm_context *ctx, LLVMValueRef *values, unsigned value_count, unsigned component) { LLVMValueRef vec = NULL; if (value_count == 1) { @@ -2909,21 +2907,21 @@ load_tes_input(struct nir_to_llvm_context *ctx, if (instr->variables[0]->var->data.location == VARYING_SLOT_CLIP_DIST0 && is_compact && const_index > 3) { const_index -= 3; param++; } unsigned comp = instr->variables[0]->var->data.location_frac; buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index, is_compact, vertex_index, indir_index); - LLVMValueRef comp_offset = LLVMConstInt(ctx->i32, comp * 4, false); + LLVMValueRef comp_offset = LLVMConstInt(ctx->ac.i32, comp * 4, false); buf_addr = LLVMBuildAdd(ctx->builder, buf_addr, comp_offset, ""); result = ac_build_buffer_load(&ctx->ac, ctx->hs_ring_tess_offchip, instr->num_components, NULL, buf_addr, ctx->oc_lds, is_compact ? (4 * const_index) : 0, 1, 0, true, false); result = trim_vector(&ctx->ac, result, instr->num_components); result = LLVMBuildBitCast(ctx->builder, result, get_def_type(ctx->nir, &instr->dest.ssa), ""); return result; } static LLVMValueRef @@ -2935,44 +2933,44 @@ load_gs_input(struct nir_to_llvm_context *ctx, LLVMValueRef args[9]; unsigned param, vtx_offset_param; LLVMValueRef value[4], result; unsigned vertex_index; get_deref_offset(ctx->nir, instr->variables[0], false, &vertex_index, NULL, &const_index, &indir_index); vtx_offset_param = vertex_index; assert(vtx_offset_param < 6); vtx_offset = LLVMBuildMul(ctx->builder, ctx->gs_vtx_offset[vtx_offset_param], - LLVMConstInt(ctx->i32, 4, false), ""); + LLVMConstInt(ctx->ac.i32, 4, false), ""); param = shader_io_get_unique_index(instr->variables[0]->var->data.location); unsigned comp = instr->variables[0]->var->data.location_frac; for (unsigned i = comp; i < instr->num_components + comp; i++) { if (ctx->ac.chip_class >= GFX9) { LLVMValueRef dw_addr = ctx->gs_vtx_offset[vtx_offset_param]; dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr, LLVMConstInt(ctx->ac.i32, param * 4 + i + const_index, 0), ""); value[i] = ac_lds_load(&ctx->ac, dw_addr); } else { args[0] = ctx->esgs_ring; args[1] = vtx_offset; - args[2] = LLVMConstInt(ctx->i32, (param * 4 + i + const_index) * 256, false); + args[2] = LLVMConstInt(ctx->ac.i32, (param * 4 + i + const_index) * 256, false); args[3] = ctx->ac.i32_0; args[4] = ctx->ac.i32_1; /* OFFEN */ args[5] = ctx->ac.i32_0; /* IDXEN */ args[6] = ctx->ac.i32_1; /* GLC */ args[7] = ctx->ac.i32_0; /* SLC */ args[8] = ctx->ac.i32_0; /* TFE */ value[i] = ac_build_intrinsic(&ctx->ac, "llvm.SI.buffer.load.dword.i32.i32", - ctx->i32, args, 9, + ctx->ac.i32, args, 9, AC_FUNC_ATTR_READONLY | AC_FUNC_ATTR_LEGACY); } } result = build_varying_gather_values(&ctx->ac, value, instr->num_components, comp); return result; } static LLVMValueRef @@ -3685,21 +3683,21 @@ static LLVMValueRef visit_image_size(struct ac_nir_context *ctx, } #define NOOP_WAITCNT 0xf7f #define LGKM_CNT 0x07f #define VM_CNT 0xf70 static void emit_waitcnt(struct nir_to_llvm_context *ctx, unsigned simm16) { LLVMValueRef args[1] = { - LLVMConstInt(ctx->i32, simm16, false), + LLVMConstInt(ctx->ac.i32, simm16, false), }; ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.s.waitcnt", ctx->voidt, args, 1, 0); } static void emit_barrier(struct nir_to_llvm_context *ctx) { /* SI only (thanks to a hw bug workaround): * The real barrier instruction isn’t needed, because an entire patch * always fits into a single wave. @@ -3723,21 +3721,21 @@ static void emit_discard_if(struct ac_nir_context *ctx, ctx->ac.i32_0, ""); ac_build_kill_if_false(&ctx->ac, cond); } static LLVMValueRef visit_load_local_invocation_index(struct nir_to_llvm_context *ctx) { LLVMValueRef result; LLVMValueRef thread_id = ac_get_thread_id(&ctx->ac); result = LLVMBuildAnd(ctx->builder, ctx->tg_size, - LLVMConstInt(ctx->i32, 0xfc0, false), ""); + LLVMConstInt(ctx->ac.i32, 0xfc0, false), ""); return LLVMBuildAdd(ctx->builder, result, thread_id, ""); } static LLVMValueRef visit_var_atomic(struct nir_to_llvm_context *ctx, const nir_intrinsic_instr *instr) { LLVMValueRef ptr, result; LLVMValueRef src = get_src(ctx->nir, instr->src[0]); ptr = build_gep_for_deref(ctx->nir, instr->variables[0]); @@ -3819,21 +3817,21 @@ static LLVMValueRef lookup_interp_param(struct nir_to_llvm_context *ctx, return ctx->linear_sample; break; } return NULL; } static LLVMValueRef load_sample_position(struct nir_to_llvm_context *ctx, LLVMValueRef sample_id) { LLVMValueRef result; - LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_PS_SAMPLE_POSITIONS, false)); + LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_PS_SAMPLE_POSITIONS, false)); ptr = LLVMBuildBitCast(ctx->builder, ptr, const_array(ctx->v2f32, 64), ""); sample_id = LLVMBuildAdd(ctx->builder, sample_id, ctx->sample_pos_offset, ""); result = ac_build_load_invariant(&ctx->ac, ptr, sample_id); return result; } @@ -3879,78 +3877,78 @@ static LLVMValueRef visit_interp(struct nir_to_llvm_context *ctx, /* fetch sample ID */ sample_position = load_sample_position(ctx, src0); src_c0 = LLVMBuildExtractElement(ctx->builder, sample_position, ctx->ac.i32_0, ""); src_c0 = LLVMBuildFSub(ctx->builder, src_c0, halfval, ""); src_c1 = LLVMBuildExtractElement(ctx->builder, sample_position, ctx->ac.i32_1, ""); src_c1 = LLVMBuildFSub(ctx->builder, src_c1, halfval, ""); } interp_param = lookup_interp_param(ctx, instr->variables[0]->var->data.interpolation, location); - attr_number = LLVMConstInt(ctx->i32, input_index, false); + attr_number = LLVMConstInt(ctx->ac.i32, input_index, false); if (location == INTERP_CENTER) { LLVMValueRef ij_out[2]; LLVMValueRef ddxy_out = emit_ddxy_interp(ctx->nir, interp_param); /* * take the I then J parameters, and the DDX/Y for it, and * calculate the IJ inputs for the interpolator. * temp1 = ddx * offset/sample.x + I; * interp_param.I = ddy * offset/sample.y + temp1; * temp1 = ddx * offset/sample.x + J; * interp_param.J = ddy * offset/sample.y + temp1; */ for (unsigned i = 0; i < 2; i++) { - LLVMValueRef ix_ll = LLVMConstInt(ctx->i32, i, false); - LLVMValueRef iy_ll = LLVMConstInt(ctx->i32, i + 2, false); + LLVMValueRef ix_ll = LLVMConstInt(ctx->ac.i32, i, false); + LLVMValueRef iy_ll = LLVMConstInt(ctx->ac.i32, i + 2, false); LLVMValueRef ddx_el = LLVMBuildExtractElement(ctx->builder, ddxy_out, ix_ll, ""); LLVMValueRef ddy_el = LLVMBuildExtractElement(ctx->builder, ddxy_out, iy_ll, ""); LLVMValueRef interp_el = LLVMBuildExtractElement(ctx->builder, interp_param, ix_ll, ""); LLVMValueRef temp1, temp2; interp_el = LLVMBuildBitCast(ctx->builder, interp_el, ctx->f32, ""); temp1 = LLVMBuildFMul(ctx->builder, ddx_el, src_c0, ""); temp1 = LLVMBuildFAdd(ctx->builder, temp1, interp_el, ""); temp2 = LLVMBuildFMul(ctx->builder, ddy_el, src_c1, ""); temp2 = LLVMBuildFAdd(ctx->builder, temp2, temp1, ""); ij_out[i] = LLVMBuildBitCast(ctx->builder, - temp2, ctx->i32, ""); + temp2, ctx->ac.i32, ""); } interp_param = ac_build_gather_values(&ctx->ac, ij_out, 2); } for (chan = 0; chan < 4; chan++) { - LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, false); + LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false); if (interp_param) { interp_param = LLVMBuildBitCast(ctx->builder, interp_param, LLVMVectorType(ctx->f32, 2), ""); LLVMValueRef i = LLVMBuildExtractElement( ctx->builder, interp_param, ctx->ac.i32_0, ""); LLVMValueRef j = LLVMBuildExtractElement( ctx->builder, interp_param, ctx->ac.i32_1, ""); result[chan] = ac_build_fs_interp(&ctx->ac, llvm_chan, attr_number, ctx->prim_mask, i, j); } else { result[chan] = ac_build_fs_interp_mov(&ctx->ac, - LLVMConstInt(ctx->i32, 2, false), + LLVMConstInt(ctx->ac.i32, 2, false), llvm_chan, attr_number, ctx->prim_mask); } } return build_varying_gather_values(&ctx->ac, result, instr->num_components, instr->variables[0]->var->data.location_frac); } static void visit_emit_vertex(struct nir_to_llvm_context *ctx, @@ -3965,21 +3963,21 @@ visit_emit_vertex(struct nir_to_llvm_context *ctx, gs_next_vertex = LLVMBuildLoad(ctx->builder, ctx->gs_next_vertex, ""); /* If this thread has already emitted the declared maximum number of * vertices, kill it: excessive vertex emissions are not supposed to * have any effect, and GS threads have no externally observable * effects other than emitting vertices. */ can_emit = LLVMBuildICmp(ctx->builder, LLVMIntULT, gs_next_vertex, - LLVMConstInt(ctx->i32, ctx->gs_max_out_vertices, false), ""); + LLVMConstInt(ctx->ac.i32, ctx->gs_max_out_vertices, false), ""); ac_build_kill_if_false(&ctx->ac, can_emit); /* loop num outputs */ idx = 0; for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) { LLVMValueRef *out_ptr = &ctx->nir->outputs[i * 4]; int length = 4; int slot = idx; int slot_inc = 1; @@ -3988,25 +3986,25 @@ visit_emit_vertex(struct nir_to_llvm_context *ctx, if (i == VARYING_SLOT_CLIP_DIST0) { /* pack clip and cull into a single set of slots */ length = ctx->num_output_clips + ctx->num_output_culls; if (length > 4) slot_inc = 2; } for (unsigned j = 0; j < length; j++) { LLVMValueRef out_val = LLVMBuildLoad(ctx->builder, out_ptr[j], ""); - LLVMValueRef voffset = LLVMConstInt(ctx->i32, (slot * 4 + j) * ctx->gs_max_out_vertices, false); + LLVMValueRef voffset = LLVMConstInt(ctx->ac.i32, (slot * 4 + j) * ctx->gs_max_out_vertices, false); voffset = LLVMBuildAdd(ctx->builder, voffset, gs_next_vertex, ""); - voffset = LLVMBuildMul(ctx->builder, voffset, LLVMConstInt(ctx->i32, 4, false), ""); + voffset = LLVMBuildMul(ctx->builder, voffset, LLVMConstInt(ctx->ac.i32, 4, false), ""); - out_val = LLVMBuildBitCast(ctx->builder, out_val, ctx->i32, ""); + out_val = LLVMBuildBitCast(ctx->builder, out_val, ctx->ac.i32, ""); ac_build_buffer_store_dword(&ctx->ac, ctx->gsvs_ring, out_val, 1, voffset, ctx->gs2vs_offset, 0, 1, 1, true, true); } idx += slot_inc; } gs_next_vertex = LLVMBuildAdd(ctx->builder, gs_next_vertex, @@ -4295,36 +4293,36 @@ static LLVMValueRef radv_get_sampler_desc(struct ac_shader_abi *abi, offset += constant_index * stride; if (desc_type == AC_DESC_SAMPLER && binding->immutable_samplers_offset && (!index || binding->immutable_samplers_equal)) { if (binding->immutable_samplers_equal) constant_index = 0; const uint32_t *samplers = radv_immutable_samplers(layout, binding); LLVMValueRef constants[] = { - LLVMConstInt(ctx->i32, samplers[constant_index * 4 + 0], 0), - LLVMConstInt(ctx->i32, samplers[constant_index * 4 + 1], 0), - LLVMConstInt(ctx->i32, samplers[constant_index * 4 + 2], 0), - LLVMConstInt(ctx->i32, samplers[constant_index * 4 + 3], 0), + LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 0], 0), + LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 1], 0), + LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 2], 0), + LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 3], 0), }; return ac_build_gather_values(&ctx->ac, constants, 4); } assert(stride % type_size == 0); if (!index) index = ctx->ac.i32_0; - index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, stride / type_size, 0), ""); + index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->ac.i32, stride / type_size, 0), ""); - list = ac_build_gep0(&ctx->ac, list, LLVMConstInt(ctx->i32, offset, 0)); + list = ac_build_gep0(&ctx->ac, list, LLVMConstInt(ctx->ac.i32, offset, 0)); list = LLVMBuildPointerCast(builder, list, const_array(type, 0), ""); return ac_build_load_to_sgpr(&ctx->ac, list, index); } static LLVMValueRef get_sampler_desc(struct ac_nir_context *ctx, const nir_deref_var *deref, enum ac_descriptor_type desc_type, const nir_tex_instr *tex_instr, bool image, bool write) @@ -5021,50 +5019,50 @@ handle_vs_input_decl(struct nir_to_llvm_context *ctx, if (ctx->options->key.vs.instance_rate_inputs & (1u << index)) { buffer_index = LLVMBuildAdd(ctx->builder, ctx->abi.instance_id, ctx->abi.start_instance, ""); ctx->shader_info->vs.vgpr_comp_cnt = MAX2(3, ctx->shader_info->vs.vgpr_comp_cnt); } else buffer_index = LLVMBuildAdd(ctx->builder, ctx->abi.vertex_id, ctx->abi.base_vertex, ""); for (unsigned i = 0; i < attrib_count; ++i, ++idx) { - t_offset = LLVMConstInt(ctx->i32, index + i, false); + t_offset = LLVMConstInt(ctx->ac.i32, index + i, false); t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset); input = ac_build_buffer_load_format(&ctx->ac, t_list, buffer_index, - LLVMConstInt(ctx->i32, 0, false), + LLVMConstInt(ctx->ac.i32, 0, false), true); for (unsigned chan = 0; chan < 4; chan++) { - LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, false); + LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false); ctx->inputs[radeon_llvm_reg_index_soa(idx, chan)] = ac_to_integer(&ctx->ac, LLVMBuildExtractElement(ctx->builder, input, llvm_chan, "")); } } } static void interp_fs_input(struct nir_to_llvm_context *ctx, unsigned attr, LLVMValueRef interp_param, LLVMValueRef prim_mask, LLVMValueRef result[4]) { LLVMValueRef attr_number; unsigned chan; LLVMValueRef i, j; bool interp = interp_param != NULL; - attr_number = LLVMConstInt(ctx->i32, attr, false); + attr_number = LLVMConstInt(ctx->ac.i32, attr, false); /* fs.constant returns the param from the middle vertex, so it's not * really useful for flat shading. It's meant to be used for custom * interpolation (but the intrinsic can't fetch from the other two * vertices). * * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state * to do the right thing. The only reason we use fs.constant is that * fs.interp cannot be used on integers, because they can be equal * to NaN. @@ -5073,30 +5071,30 @@ static void interp_fs_input(struct nir_to_llvm_context *ctx, interp_param = LLVMBuildBitCast(ctx->builder, interp_param, LLVMVectorType(ctx->f32, 2), ""); i = LLVMBuildExtractElement(ctx->builder, interp_param, ctx->ac.i32_0, ""); j = LLVMBuildExtractElement(ctx->builder, interp_param, ctx->ac.i32_1, ""); } for (chan = 0; chan < 4; chan++) { - LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, false); + LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false); if (interp) { result[chan] = ac_build_fs_interp(&ctx->ac, llvm_chan, attr_number, prim_mask, i, j); } else { result[chan] = ac_build_fs_interp_mov(&ctx->ac, - LLVMConstInt(ctx->i32, 2, false), + LLVMConstInt(ctx->ac.i32, 2, false), llvm_chan, attr_number, prim_mask); } } } static void handle_fs_input_decl(struct nir_to_llvm_context *ctx, struct nir_variable *variable) @@ -5329,21 +5327,21 @@ handle_shader_output_decl(struct ac_nir_context *ctx, static LLVMTypeRef glsl_base_to_llvm_type(struct nir_to_llvm_context *ctx, enum glsl_base_type type) { switch (type) { case GLSL_TYPE_INT: case GLSL_TYPE_UINT: case GLSL_TYPE_BOOL: case GLSL_TYPE_SUBROUTINE: - return ctx->i32; + return ctx->ac.i32; case GLSL_TYPE_FLOAT: /* TODO handle mediump */ return ctx->f32; case GLSL_TYPE_INT64: case GLSL_TYPE_UINT64: return ctx->i64; case GLSL_TYPE_DOUBLE: return ctx->f64; default: unreachable("unknown GLSL type"); } @@ -5431,25 +5429,25 @@ emit_float_saturate(struct ac_llvm_context *ctx, LLVMValueRef v, float lo, float { v = ac_to_float(ctx, v); v = emit_intrin_2f_param(ctx, "llvm.maxnum", ctx->f32, v, LLVMConstReal(ctx->f32, lo)); return emit_intrin_2f_param(ctx, "llvm.minnum", ctx->f32, v, LLVMConstReal(ctx->f32, hi)); } static LLVMValueRef emit_pack_int16(struct nir_to_llvm_context *ctx, LLVMValueRef src0, LLVMValueRef src1) { - LLVMValueRef const16 = LLVMConstInt(ctx->i32, 16, false); + LLVMValueRef const16 = LLVMConstInt(ctx->ac.i32, 16, false); LLVMValueRef comp[2]; - comp[0] = LLVMBuildAnd(ctx->builder, src0, LLVMConstInt(ctx-> i32, 65535, 0), ""); - comp[1] = LLVMBuildAnd(ctx->builder, src1, LLVMConstInt(ctx-> i32, 65535, 0), ""); + comp[0] = LLVMBuildAnd(ctx->builder, src0, LLVMConstInt(ctx->ac.i32, 65535, 0), ""); + comp[1] = LLVMBuildAnd(ctx->builder, src1, LLVMConstInt(ctx->ac.i32, 65535, 0), ""); comp[1] = LLVMBuildShl(ctx->builder, comp[1], const16, ""); return LLVMBuildOr(ctx->builder, comp[0], comp[1], ""); } /* Initialize arguments for the shader export intrinsic */ static void si_llvm_init_export_args(struct nir_to_llvm_context *ctx, LLVMValueRef *values, unsigned target, struct ac_export_args *args) @@ -5521,72 +5519,72 @@ si_llvm_init_export_args(struct nir_to_llvm_context *ctx, break; case V_028714_SPI_SHADER_UNORM16_ABGR: for (unsigned chan = 0; chan < 4; chan++) { val[chan] = ac_build_clamp(&ctx->ac, values[chan]); val[chan] = LLVMBuildFMul(ctx->builder, val[chan], LLVMConstReal(ctx->f32, 65535), ""); val[chan] = LLVMBuildFAdd(ctx->builder, val[chan], LLVMConstReal(ctx->f32, 0.5), ""); val[chan] = LLVMBuildFPToUI(ctx->builder, val[chan], - ctx->i32, ""); + ctx->ac.i32, ""); } args->compr = 1; args->out[0] = emit_pack_int16(ctx, val[0], val[1]); args->out[1] = emit_pack_int16(ctx, val[2], val[3]); break; case V_028714_SPI_SHADER_SNORM16_ABGR: for (unsigned chan = 0; chan < 4; chan++) { val[chan] = emit_float_saturate(&ctx->ac, values[chan], -1, 1); val[chan] = LLVMBuildFMul(ctx->builder, val[chan], LLVMConstReal(ctx->f32, 32767), ""); /* If positive, add 0.5, else add -0.5. */ val[chan] = LLVMBuildFAdd(ctx->builder, val[chan], LLVMBuildSelect(ctx->builder, LLVMBuildFCmp(ctx->builder, LLVMRealOGE, val[chan], ctx->ac.f32_0, ""), LLVMConstReal(ctx->f32, 0.5), LLVMConstReal(ctx->f32, -0.5), ""), ""); - val[chan] = LLVMBuildFPToSI(ctx->builder, val[chan], ctx->i32, ""); + val[chan] = LLVMBuildFPToSI(ctx->builder, val[chan], ctx->ac.i32, ""); } args->compr = 1; args->out[0] = emit_pack_int16(ctx, val[0], val[1]); args->out[1] = emit_pack_int16(ctx, val[2], val[3]); break; case V_028714_SPI_SHADER_UINT16_ABGR: { - LLVMValueRef max_rgb = LLVMConstInt(ctx->i32, + LLVMValueRef max_rgb = LLVMConstInt(ctx->ac.i32, is_int8 ? 255 : is_int10 ? 1023 : 65535, 0); - LLVMValueRef max_alpha = !is_int10 ? max_rgb : LLVMConstInt(ctx->i32, 3, 0); + LLVMValueRef max_alpha = !is_int10 ? max_rgb : LLVMConstInt(ctx->ac.i32, 3, 0); for (unsigned chan = 0; chan < 4; chan++) { val[chan] = ac_to_integer(&ctx->ac, values[chan]); val[chan] = emit_minmax_int(&ctx->ac, LLVMIntULT, val[chan], chan == 3 ? max_alpha : max_rgb); } args->compr = 1; args->out[0] = emit_pack_int16(ctx, val[0], val[1]); args->out[1] = emit_pack_int16(ctx, val[2], val[3]); break; } case V_028714_SPI_SHADER_SINT16_ABGR: { - LLVMValueRef max_rgb = LLVMConstInt(ctx->i32, + LLVMValueRef max_rgb = LLVMConstInt(ctx->ac.i32, is_int8 ? 127 : is_int10 ? 511 : 32767, 0); - LLVMValueRef min_rgb = LLVMConstInt(ctx->i32, + LLVMValueRef min_rgb = LLVMConstInt(ctx->ac.i32, is_int8 ? -128 : is_int10 ? -512 : -32768, 0); LLVMValueRef max_alpha = !is_int10 ? max_rgb : ctx->ac.i32_1; - LLVMValueRef min_alpha = !is_int10 ? min_rgb : LLVMConstInt(ctx->i32, -2, 0); + LLVMValueRef min_alpha = !is_int10 ? min_rgb : LLVMConstInt(ctx->ac.i32, -2, 0); /* Clamp. */ for (unsigned chan = 0; chan < 4; chan++) { val[chan] = ac_to_integer(&ctx->ac, values[chan]); val[chan] = emit_minmax_int(&ctx->ac, LLVMIntSLT, val[chan], chan == 3 ? max_alpha : max_rgb); val[chan] = emit_minmax_int(&ctx->ac, LLVMIntSGT, val[chan], chan == 3 ? min_alpha : min_rgb); } args->compr = 1; args->out[0] = emit_pack_int16(ctx, val[0], val[1]); @@ -5707,21 +5705,21 @@ handle_vs_outputs_post(struct nir_to_llvm_context *ctx, if (outinfo->writes_layer == true) pos_args[1].out[2] = layer_value; if (outinfo->writes_viewport_index == true) { if (ctx->options->chip_class >= GFX9) { /* GFX9 has the layer in out.z[10:0] and the viewport * index in out.z[19:16]. */ LLVMValueRef v = viewport_index_value; v = ac_to_integer(&ctx->ac, v); v = LLVMBuildShl(ctx->builder, v, - LLVMConstInt(ctx->i32, 16, false), + LLVMConstInt(ctx->ac.i32, 16, false), ""); v = LLVMBuildOr(ctx->builder, v, ac_to_integer(&ctx->ac, pos_args[1].out[2]), ""); pos_args[1].out[2] = ac_to_float(&ctx->ac, v); pos_args[1].enabled_channels |= 1 << 2; } else { pos_args[1].out[3] = viewport_index_value; pos_args[1].enabled_channels |= 1 << 3; } @@ -5826,47 +5824,47 @@ handle_es_outputs_post(struct nir_to_llvm_context *ctx, outinfo->esgs_itemsize = (max_output_written + 1) * 16; if (ctx->ac.chip_class >= GFX9) { unsigned itemsize_dw = outinfo->esgs_itemsize / 4; LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac); LLVMValueRef wave_idx = ac_build_bfe(&ctx->ac, ctx->merged_wave_info, LLVMConstInt(ctx->ac.i32, 24, false), LLVMConstInt(ctx->ac.i32, 4, false), false); vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx, LLVMBuildMul(ctx->ac.builder, wave_idx, - LLVMConstInt(ctx->i32, 64, false), ""), ""); + LLVMConstInt(ctx->ac.i32, 64, false), ""), ""); lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx, - LLVMConstInt(ctx->i32, itemsize_dw, 0), ""); + LLVMConstInt(ctx->ac.i32, itemsize_dw, 0), ""); } for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) { LLVMValueRef dw_addr; LLVMValueRef *out_ptr = &ctx->nir->outputs[i * 4]; int param_index; int length = 4; if (!(ctx->output_mask & (1ull << i))) continue; if (i == VARYING_SLOT_CLIP_DIST0) length = ctx->num_output_clips + ctx->num_output_culls; param_index = shader_io_get_unique_index(i); if (lds_base) { dw_addr = LLVMBuildAdd(ctx->builder, lds_base, - LLVMConstInt(ctx->i32, param_index * 4, false), + LLVMConstInt(ctx->ac.i32, param_index * 4, false), ""); } for (j = 0; j < length; j++) { LLVMValueRef out_val = LLVMBuildLoad(ctx->builder, out_ptr[j], ""); - out_val = LLVMBuildBitCast(ctx->builder, out_val, ctx->i32, ""); + out_val = LLVMBuildBitCast(ctx->builder, out_val, ctx->ac.i32, ""); if (ctx->ac.chip_class >= GFX9) { ac_lds_store(&ctx->ac, dw_addr, LLVMBuildLoad(ctx->builder, out_ptr[j], "")); dw_addr = LLVMBuildAdd(ctx->builder, dw_addr, ctx->ac.i32_1, ""); } else { ac_build_buffer_store_dword(&ctx->ac, ctx->esgs_ring, out_val, 1, NULL, ctx->es2gs_offset, @@ -5892,21 +5890,21 @@ handle_ls_outputs_post(struct nir_to_llvm_context *ctx) if (!(ctx->output_mask & (1ull << i))) continue; if (i == VARYING_SLOT_CLIP_DIST0) length = ctx->num_output_clips + ctx->num_output_culls; int param = shader_io_get_unique_index(i); mark_tess_output(ctx, false, param); if (length > 4) mark_tess_output(ctx, false, param + 1); LLVMValueRef dw_addr = LLVMBuildAdd(ctx->builder, base_dw_addr, - LLVMConstInt(ctx->i32, param * 4, false), + LLVMConstInt(ctx->ac.i32, param * 4, false), ""); for (unsigned j = 0; j < length; j++) { ac_lds_store(&ctx->ac, dw_addr, LLVMBuildLoad(ctx->builder, out_ptr[j], "")); dw_addr = LLVMBuildAdd(ctx->builder, dw_addr, ctx->ac.i32_1, ""); } } } struct ac_build_if_state @@ -6037,72 +6035,72 @@ write_tess_factors(struct nir_to_llvm_context *ctx) LLVMBuildICmp(ctx->builder, LLVMIntEQ, invocation_id, ctx->ac.i32_0, "")); tess_inner_index = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER); tess_outer_index = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER); mark_tess_output(ctx, true, tess_inner_index); mark_tess_output(ctx, true, tess_outer_index); lds_base = get_tcs_out_current_patch_data_offset(ctx); lds_inner = LLVMBuildAdd(ctx->builder, lds_base, - LLVMConstInt(ctx->i32, tess_inner_index * 4, false), ""); + LLVMConstInt(ctx->ac.i32, tess_inner_index * 4, false), ""); lds_outer = LLVMBuildAdd(ctx->builder, lds_base, - LLVMConstInt(ctx->i32, tess_outer_index * 4, false), ""); + LLVMConstInt(ctx->ac.i32, tess_outer_index * 4, false), ""); for (i = 0; i < 4; i++) { - inner[i] = LLVMGetUndef(ctx->i32); - outer[i] = LLVMGetUndef(ctx->i32); + inner[i] = LLVMGetUndef(ctx->ac.i32); + outer[i] = LLVMGetUndef(ctx->ac.i32); } // LINES reverseal if (ctx->options->key.tcs.primitive_mode == GL_ISOLINES) { outer[0] = out[1] = ac_lds_load(&ctx->ac, lds_outer); lds_outer = LLVMBuildAdd(ctx->builder, lds_outer, - LLVMConstInt(ctx->i32, 1, false), ""); + LLVMConstInt(ctx->ac.i32, 1, false), ""); outer[1] = out[0] = ac_lds_load(&ctx->ac, lds_outer); } else { for (i = 0; i < outer_comps; i++) { outer[i] = out[i] = ac_lds_load(&ctx->ac, lds_outer); lds_outer = LLVMBuildAdd(ctx->builder, lds_outer, - LLVMConstInt(ctx->i32, 1, false), ""); + LLVMConstInt(ctx->ac.i32, 1, false), ""); } for (i = 0; i < inner_comps; i++) { inner[i] = out[outer_comps+i] = ac_lds_load(&ctx->ac, lds_inner); lds_inner = LLVMBuildAdd(ctx->builder, lds_inner, - LLVMConstInt(ctx->i32, 1, false), ""); + LLVMConstInt(ctx->ac.i32, 1, false), ""); } } /* Convert the outputs to vectors for stores. */ vec0 = ac_build_gather_values(&ctx->ac, out, MIN2(stride, 4)); vec1 = NULL; if (stride > 4) vec1 = ac_build_gather_values(&ctx->ac, out + 4, stride - 4); buffer = ctx->hs_ring_tess_factor; tf_base = ctx->tess_factor_offset; byteoffset = LLVMBuildMul(ctx->builder, rel_patch_id, - LLVMConstInt(ctx->i32, 4 * stride, false), ""); + LLVMConstInt(ctx->ac.i32, 4 * stride, false), ""); unsigned tf_offset = 0; if (ctx->options->chip_class <= VI) { ac_nir_build_if(&inner_if_ctx, ctx, LLVMBuildICmp(ctx->builder, LLVMIntEQ, rel_patch_id, ctx->ac.i32_0, "")); /* Store the dynamic HS control word. */ ac_build_buffer_store_dword(&ctx->ac, buffer, - LLVMConstInt(ctx->i32, 0x80000000, false), + LLVMConstInt(ctx->ac.i32, 0x80000000, false), 1, ctx->ac.i32_0, tf_base, 0, 1, 0, true, false); tf_offset += 4; ac_nir_build_endif(&inner_if_ctx); } /* Store the tessellation factors. */ ac_build_buffer_store_dword(&ctx->ac, buffer, vec0, MIN2(stride, 4), byteoffset, tf_base, @@ -6113,32 +6111,32 @@ write_tess_factors(struct nir_to_llvm_context *ctx) 16 + tf_offset, 1, 0, true, false); //store to offchip for TES to read - only if TES reads them if (ctx->options->key.tcs.tes_reads_tess_factors) { LLVMValueRef inner_vec, outer_vec, tf_outer_offset; LLVMValueRef tf_inner_offset; unsigned param_outer, param_inner; param_outer = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER); tf_outer_offset = get_tcs_tes_buffer_address(ctx, NULL, - LLVMConstInt(ctx->i32, param_outer, 0)); + LLVMConstInt(ctx->ac.i32, param_outer, 0)); outer_vec = ac_build_gather_values(&ctx->ac, outer, util_next_power_of_two(outer_comps)); ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, outer_vec, outer_comps, tf_outer_offset, ctx->oc_lds, 0, 1, 0, true, false); if (inner_comps) { param_inner = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER); tf_inner_offset = get_tcs_tes_buffer_address(ctx, NULL, - LLVMConstInt(ctx->i32, param_inner, 0)); + LLVMConstInt(ctx->ac.i32, param_inner, 0)); inner_vec = inner_comps == 1 ? inner[0] : ac_build_gather_values(&ctx->ac, inner, inner_comps); ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, inner_vec, inner_comps, tf_inner_offset, ctx->oc_lds, 0, 1, 0, true, false); } } ac_nir_build_endif(&if_ctx); } @@ -6362,43 +6360,43 @@ ac_nir_eliminate_const_vs_outputs(struct nir_to_llvm_context *ctx) outinfo->vs_output_param_offset, VARYING_SLOT_MAX, &outinfo->param_exports); } static void ac_setup_rings(struct nir_to_llvm_context *ctx) { if ((ctx->stage == MESA_SHADER_VERTEX && ctx->options->key.vs.as_es) || (ctx->stage == MESA_SHADER_TESS_EVAL && ctx->options->key.tes.as_es)) { - ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_ESGS_VS, false)); + ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_ESGS_VS, false)); } if (ctx->is_gs_copy_shader) { - ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_GSVS_VS, false)); + ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_VS, false)); } if (ctx->stage == MESA_SHADER_GEOMETRY) { LLVMValueRef tmp; - ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_ESGS_GS, false)); - ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_GSVS_GS, false)); + ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_ESGS_GS, false)); + ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_GS, false)); ctx->gsvs_ring = LLVMBuildBitCast(ctx->builder, ctx->gsvs_ring, ctx->v4i32, ""); - ctx->gsvs_ring = LLVMBuildInsertElement(ctx->builder, ctx->gsvs_ring, ctx->gsvs_num_entries, LLVMConstInt(ctx->i32, 2, false), ""); + ctx->gsvs_ring = LLVMBuildInsertElement(ctx->builder, ctx->gsvs_ring, ctx->gsvs_num_entries, LLVMConstInt(ctx->ac.i32, 2, false), ""); tmp = LLVMBuildExtractElement(ctx->builder, ctx->gsvs_ring, ctx->ac.i32_1, ""); tmp = LLVMBuildOr(ctx->builder, tmp, ctx->gsvs_ring_stride, ""); ctx->gsvs_ring = LLVMBuildInsertElement(ctx->builder, ctx->gsvs_ring, tmp, ctx->ac.i32_1, ""); } if (ctx->stage == MESA_SHADER_TESS_CTRL || ctx->stage == MESA_SHADER_TESS_EVAL) { - ctx->hs_ring_tess_offchip = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_HS_TESS_OFFCHIP, false)); - ctx->hs_ring_tess_factor = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_HS_TESS_FACTOR, false)); + ctx->hs_ring_tess_offchip = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_OFFCHIP, false)); + ctx->hs_ring_tess_factor = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_FACTOR, false)); } } static unsigned ac_nir_get_max_workgroup_size(enum chip_class chip_class, const struct nir_shader *nir) { switch (nir->info.stage) { case MESA_SHADER_TESS_CTRL: return chip_class >= CIK ? 128 : 64; @@ -6554,21 +6552,21 @@ LLVMModuleRef ac_translate_nir_to_llvm(LLVMTargetMachineRef tm, ac_nir_fixup_ls_hs_input_vgprs(&ctx); for(int i = 0; i < shader_count; ++i) { ctx.stage = shaders[i]->info.stage; ctx.output_mask = 0; ctx.tess_outputs_written = 0; ctx.num_output_clips = shaders[i]->info.clip_distance_array_size; ctx.num_output_culls = shaders[i]->info.cull_distance_array_size; if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) { - ctx.gs_next_vertex = ac_build_alloca(&ctx.ac, ctx.i32, "gs_next_vertex"); + ctx.gs_next_vertex = ac_build_alloca(&ctx.ac, ctx.ac.i32, "gs_next_vertex"); ctx.gs_max_out_vertices = shaders[i]->info.gs.vertices_out; } else if (shaders[i]->info.stage == MESA_SHADER_TESS_EVAL) { ctx.tes_primitive_mode = shaders[i]->info.tess.primitive_mode; } else if (shaders[i]->info.stage == MESA_SHADER_VERTEX) { if (shader_info->info.vs.needs_instance_id) { ctx.shader_info->vs.vgpr_comp_cnt = MAX2(3, ctx.shader_info->vs.vgpr_comp_cnt); } } else if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT) { @@ -6819,21 +6817,21 @@ void ac_compile_nir_shader(LLVMTargetMachineRef tm, ac_compile_llvm_module(tm, llvm_module, binary, config, shader_info, nir[0]->info.stage, dump_shader, options->supports_spill); for (int i = 0; i < nir_count; ++i) ac_fill_shader_info(shader_info, nir[i], options); } static void ac_gs_copy_shader_emit(struct nir_to_llvm_context *ctx) { LLVMValueRef args[9]; args[0] = ctx->gsvs_ring; - args[1] = LLVMBuildMul(ctx->builder, ctx->abi.vertex_id, LLVMConstInt(ctx->i32, 4, false), ""); + args[1] = LLVMBuildMul(ctx->builder, ctx->abi.vertex_id, LLVMConstInt(ctx->ac.i32, 4, false), ""); args[3] = ctx->ac.i32_0; args[4] = ctx->ac.i32_1; /* OFFEN */ args[5] = ctx->ac.i32_0; /* IDXEN */ args[6] = ctx->ac.i32_1; /* GLC */ args[7] = ctx->ac.i32_1; /* SLC */ args[8] = ctx->ac.i32_0; /* TFE */ int idx = 0; for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) { @@ -6845,27 +6843,27 @@ ac_gs_copy_shader_emit(struct nir_to_llvm_context *ctx) if (i == VARYING_SLOT_CLIP_DIST0) { /* unpack clip and cull from a single set of slots */ length = ctx->num_output_clips + ctx->num_output_culls; if (length > 4) slot_inc = 2; } for (unsigned j = 0; j < length; j++) { LLVMValueRef value; - args[2] = LLVMConstInt(ctx->i32, + args[2] = LLVMConstInt(ctx->ac.i32, (slot * 4 + j) * ctx->gs_max_out_vertices * 16 * 4, false); value = ac_build_intrinsic(&ctx->ac, "llvm.SI.buffer.load.dword.i32.i32", - ctx->i32, args, 9, + ctx->ac.i32, args, 9, AC_FUNC_ATTR_READONLY | AC_FUNC_ATTR_LEGACY); LLVMBuildStore(ctx->builder, ac_to_float(&ctx->ac, value), ctx->nir->outputs[radeon_llvm_reg_index_soa(i, j)]); } idx += slot_inc; } handle_vs_outputs_post(ctx, false, &ctx->shader_info->vs.outinfo); } -- 2.14.3 _______________________________________________ mesa-dev mailing list mesa-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/mesa-dev