---
 src/amd/common/ac_nir_to_llvm.c | 16 +++++++---------
 1 file changed, 7 insertions(+), 9 deletions(-)

diff --git a/src/amd/common/ac_nir_to_llvm.c b/src/amd/common/ac_nir_to_llvm.c
index 6a638e3f16..40c856224a 100644
--- a/src/amd/common/ac_nir_to_llvm.c
+++ b/src/amd/common/ac_nir_to_llvm.c
@@ -127,21 +127,20 @@ struct nir_to_llvm_context {
        LLVMValueRef esgs_ring;
        LLVMValueRef gsvs_ring;
        LLVMValueRef hs_ring_tess_offchip;
        LLVMValueRef hs_ring_tess_factor;
 
        LLVMValueRef prim_mask;
        LLVMValueRef sample_pos_offset;
        LLVMValueRef persp_sample, persp_center, persp_centroid;
        LLVMValueRef linear_sample, linear_center, linear_centroid;
 
-       LLVMTypeRef v4i32;
        LLVMTypeRef v8i32;
        LLVMTypeRef f64;
        LLVMTypeRef f32;
        LLVMTypeRef f16;
        LLVMTypeRef v2f32;
        LLVMTypeRef v4f32;
 
        unsigned uniform_md_kind;
        LLVMValueRef empty_md;
        gl_shader_stage stage;
@@ -677,21 +676,21 @@ radv_define_common_user_sgprs_phase2(struct 
nir_to_llvm_context *ctx,
 
 static void
 radv_define_vs_user_sgprs_phase1(struct nir_to_llvm_context *ctx,
                                  gl_shader_stage stage,
                                  bool has_previous_stage,
                                  gl_shader_stage previous_stage,
                                  struct arg_info *args)
 {
        if (!ctx->is_gs_copy_shader && (stage == MESA_SHADER_VERTEX || 
(has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
                if (ctx->shader_info->info.vs.has_vertex_buffers)
-                       add_user_sgpr_argument(args, const_array(ctx->v4i32, 
16), &ctx->vertex_buffers); /* vertex buffers */
+                       add_user_sgpr_argument(args, const_array(ctx->ac.v4i32, 
16), &ctx->vertex_buffers); /* vertex buffers */
                add_user_sgpr_argument(args, ctx->ac.i32, 
&ctx->abi.base_vertex); // base vertex
                add_user_sgpr_argument(args, ctx->ac.i32, 
&ctx->abi.start_instance);// start instance
                if (ctx->shader_info->info.vs.needs_draw_id)
                        add_user_sgpr_argument(args, ctx->ac.i32, 
&ctx->abi.draw_id); // draw id
        }
 }
 
 static void
 radv_define_vs_user_sgprs_phase2(struct nir_to_llvm_context *ctx,
                                  gl_shader_stage stage,
@@ -718,21 +717,21 @@ static void create_function(struct nir_to_llvm_context 
*ctx,
                             gl_shader_stage previous_stage)
 {
        uint8_t user_sgpr_idx;
        struct user_sgpr_info user_sgpr_info;
        struct arg_info args = {};
        LLVMValueRef desc_sets;
 
        allocate_user_sgprs(ctx, &user_sgpr_info);
 
        if (user_sgpr_info.need_ring_offsets && !ctx->options->supports_spill) {
-               add_user_sgpr_argument(&args, const_array(ctx->v4i32, 16), 
&ctx->ring_offsets); /* address of rings */
+               add_user_sgpr_argument(&args, const_array(ctx->ac.v4i32, 16), 
&ctx->ring_offsets); /* address of rings */
        }
 
        switch (stage) {
        case MESA_SHADER_COMPUTE:
                radv_define_common_user_sgprs_phase1(ctx, stage, 
has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
                if (ctx->shader_info->info.cs.grid_components_used)
                        add_user_sgpr_argument(&args, 
LLVMVectorType(ctx->ac.i32, ctx->shader_info->info.cs.grid_components_used), 
&ctx->num_work_groups); /* grid size */
                add_sgpr_argument(&args, ctx->ac.v3i32, &ctx->workgroup_ids);
                add_sgpr_argument(&args, ctx->ac.i32, &ctx->tg_size);
                add_vgpr_argument(&args, ctx->ac.v3i32, 
&ctx->local_invocation_ids);
@@ -915,21 +914,21 @@ static void create_function(struct nir_to_llvm_context 
*ctx,
 
        user_sgpr_idx = 0;
 
        if (ctx->options->supports_spill || user_sgpr_info.need_ring_offsets) {
                set_userdata_location_shader(ctx, AC_UD_SCRATCH_RING_OFFSETS, 
&user_sgpr_idx, 2);
                if (ctx->options->supports_spill) {
                        ctx->ring_offsets = ac_build_intrinsic(&ctx->ac, 
"llvm.amdgcn.implicit.buffer.ptr",
                                                               
LLVMPointerType(ctx->ac.i8, CONST_ADDR_SPACE),
                                                               NULL, 0, 
AC_FUNC_ATTR_READNONE);
                        ctx->ring_offsets = LLVMBuildBitCast(ctx->builder, 
ctx->ring_offsets,
-                                                            
const_array(ctx->v4i32, 16), "");
+                                                            
const_array(ctx->ac.v4i32, 16), "");
                }
        }
        
        /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in 
front (including
         * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the 
count from 0 */
        if (has_previous_stage)
                user_sgpr_idx = 0;
 
        radv_define_common_user_sgprs_phase2(ctx, stage, has_previous_stage, 
previous_stage, &user_sgpr_info, desc_sets, &user_sgpr_idx);
 
@@ -983,21 +982,20 @@ static void create_function(struct nir_to_llvm_context 
*ctx,
                break;
        default:
                unreachable("Shader stage not implemented");
        }
 
        ctx->shader_info->num_user_sgprs = user_sgpr_idx;
 }
 
 static void setup_types(struct nir_to_llvm_context *ctx)
 {
-       ctx->v4i32 = LLVMVectorType(ctx->ac.i32, 4);
        ctx->v8i32 = LLVMVectorType(ctx->ac.i32, 8);
        ctx->f32 = LLVMFloatTypeInContext(ctx->context);
        ctx->f16 = LLVMHalfTypeInContext(ctx->context);
        ctx->f64 = LLVMDoubleTypeInContext(ctx->context);
        ctx->v2f32 = LLVMVectorType(ctx->f32, 2);
        ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
 
        ctx->uniform_md_kind =
            LLVMGetMDKindIDInContext(ctx->context, "amdgpu.uniform", 14);
        ctx->empty_md = LLVMMDNodeInContext(ctx->context, NULL, 0);
@@ -2229,21 +2227,21 @@ static LLVMValueRef visit_vulkan_resource_index(struct 
nir_to_llvm_context *ctx,
                base_offset = pipeline_layout->push_constant_size + 16 * idx;
                stride = LLVMConstInt(ctx->ac.i32, 16, false);
        } else
                stride = LLVMConstInt(ctx->ac.i32, 
layout->binding[binding].size, false);
 
        offset = LLVMConstInt(ctx->ac.i32, base_offset, false);
        index = LLVMBuildMul(ctx->builder, index, stride, "");
        offset = LLVMBuildAdd(ctx->builder, offset, index, "");
        
        desc_ptr = ac_build_gep0(&ctx->ac, desc_ptr, offset);
-       desc_ptr = cast_ptr(ctx, desc_ptr, ctx->v4i32);
+       desc_ptr = cast_ptr(ctx, desc_ptr, ctx->ac.v4i32);
        LLVMSetMetadata(desc_ptr, ctx->uniform_md_kind, ctx->empty_md);
 
        return LLVMBuildLoad(ctx->builder, desc_ptr, "");
 }
 
 static LLVMValueRef visit_load_push_constant(struct nir_to_llvm_context *ctx,
                                              nir_intrinsic_instr *instr)
 {
        LLVMValueRef ptr, addr;
 
@@ -4255,28 +4253,28 @@ static LLVMValueRef radv_get_sampler_desc(struct 
ac_shader_abi *abi,
        case AC_DESC_IMAGE:
                type = ctx->v8i32;
                type_size = 32;
                break;
        case AC_DESC_FMASK:
                type = ctx->v8i32;
                offset += 32;
                type_size = 32;
                break;
        case AC_DESC_SAMPLER:
-               type = ctx->v4i32;
+               type = ctx->ac.v4i32;
                if (binding->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
                        offset += 64;
 
                type_size = 16;
                break;
        case AC_DESC_BUFFER:
-               type = ctx->v4i32;
+               type = ctx->ac.v4i32;
                type_size = 16;
                break;
        default:
                unreachable("invalid desc_type\n");
        }
 
        offset += constant_index * stride;
 
        if (desc_type == AC_DESC_SAMPLER && binding->immutable_samplers_offset 
&&
            (!index || binding->immutable_samplers_equal)) {
@@ -6357,21 +6355,21 @@ ac_setup_rings(struct nir_to_llvm_context *ctx)
        }
 
        if (ctx->is_gs_copy_shader) {
                ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, 
ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_VS, false));
        }
        if (ctx->stage == MESA_SHADER_GEOMETRY) {
                LLVMValueRef tmp;
                ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, 
ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_ESGS_GS, false));
                ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, 
ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_GS, false));
 
-               ctx->gsvs_ring = LLVMBuildBitCast(ctx->builder, ctx->gsvs_ring, 
ctx->v4i32, "");
+               ctx->gsvs_ring = LLVMBuildBitCast(ctx->builder, ctx->gsvs_ring, 
ctx->ac.v4i32, "");
 
                ctx->gsvs_ring = LLVMBuildInsertElement(ctx->builder, 
ctx->gsvs_ring, ctx->gsvs_num_entries, LLVMConstInt(ctx->ac.i32, 2, false), "");
                tmp = LLVMBuildExtractElement(ctx->builder, ctx->gsvs_ring, 
ctx->ac.i32_1, "");
                tmp = LLVMBuildOr(ctx->builder, tmp, ctx->gsvs_ring_stride, "");
                ctx->gsvs_ring = LLVMBuildInsertElement(ctx->builder, 
ctx->gsvs_ring, tmp, ctx->ac.i32_1, "");
        }
 
        if (ctx->stage == MESA_SHADER_TESS_CTRL ||
            ctx->stage == MESA_SHADER_TESS_EVAL) {
                ctx->hs_ring_tess_offchip = ac_build_load_to_sgpr(&ctx->ac, 
ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_OFFCHIP, false));
-- 
2.14.3

_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to