As of this commit, all uses of const sources either go through a
nir_src_as_<type> helper which handles bit sizes correctly or else are
accompanied by a nir_src_bit_size() == 32 assertion to assert that we
have the size we think we have.
---
 src/intel/compiler/brw_fs_nir.cpp | 200 ++++++++++++++----------------
 1 file changed, 90 insertions(+), 110 deletions(-)

diff --git a/src/intel/compiler/brw_fs_nir.cpp 
b/src/intel/compiler/brw_fs_nir.cpp
index f85ced96067..e5a7e7887a9 100644
--- a/src/intel/compiler/brw_fs_nir.cpp
+++ b/src/intel/compiler/brw_fs_nir.cpp
@@ -520,8 +520,7 @@ fs_visitor::optimize_extract_to_float(nir_alu_instr *instr,
        src0->op != nir_op_extract_i8 && src0->op != nir_op_extract_i16)
       return false;
 
-   nir_const_value *element = nir_src_as_const_value(src0->src[1].src);
-   assert(element != NULL);
+   unsigned element = nir_src_as_uint(src0->src[1].src);
 
    /* Element type to extract.*/
    const brw_reg_type type = brw_int_type(
@@ -535,7 +534,7 @@ fs_visitor::optimize_extract_to_float(nir_alu_instr *instr,
    op0 = offset(op0, bld, src0->src[0].swizzle[0]);
 
    set_saturate(instr->dest.saturate,
-                bld.MOV(result, subscript(op0, type, element->u32[0])));
+                bld.MOV(result, subscript(op0, type, element)));
    return true;
 }
 
@@ -553,14 +552,18 @@ fs_visitor::optimize_frontfacing_ternary(nir_alu_instr 
*instr,
    if (src0->intrinsic != nir_intrinsic_load_front_face)
       return false;
 
-   nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
-   if (!value1 || fabsf(value1->f32[0]) != 1.0f)
+   if (!nir_src_is_const(instr->src[1].src) ||
+       !nir_src_is_const(instr->src[2].src))
       return false;
 
-   nir_const_value *value2 = nir_src_as_const_value(instr->src[2].src);
-   if (!value2 || fabsf(value2->f32[0]) != 1.0f)
+   const float value1 = nir_src_as_float(instr->src[1].src);
+   const float value2 = nir_src_as_float(instr->src[2].src);
+   if (fabsf(value1) != 1.0f || fabsf(value2) != 1.0f)
       return false;
 
+   /* nir_opt_algebraic should have gotten rid of bcsel(b, a, a) */
+   assert(value1 == -value2);
+
    fs_reg tmp = vgrf(glsl_type::int_type);
 
    if (devinfo->gen >= 6) {
@@ -578,7 +581,7 @@ fs_visitor::optimize_frontfacing_ternary(nir_alu_instr 
*instr,
        * surely be TRIANGLES
        */
 
-      if (value1->f32[0] == -1.0f) {
+      if (value1 == -1.0f) {
          g0.negate = true;
       }
 
@@ -599,7 +602,7 @@ fs_visitor::optimize_frontfacing_ternary(nir_alu_instr 
*instr,
        * surely be TRIANGLES
        */
 
-      if (value1->f32[0] == -1.0f) {
+      if (value1 == -1.0f) {
          g1_6.negate = true;
       }
 
@@ -1514,8 +1517,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, 
nir_alu_instr *instr)
 
    case nir_op_extract_u8:
    case nir_op_extract_i8: {
-      nir_const_value *byte = nir_src_as_const_value(instr->src[1].src);
-      assert(byte != NULL);
+      unsigned byte = nir_src_as_uint(instr->src[1].src);
 
       /* The PRMs say:
        *
@@ -1529,15 +1531,15 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, 
nir_alu_instr *instr)
          if (instr->op == nir_op_extract_i8) {
             /* If we need to sign extend, extract to a word first */
             fs_reg w_temp = bld.vgrf(BRW_REGISTER_TYPE_W);
-            bld.MOV(w_temp, subscript(op[0], type, byte->u32[0]));
+            bld.MOV(w_temp, subscript(op[0], type, byte));
             bld.MOV(result, w_temp);
          } else {
             /* Otherwise use an AND with 0xff and a word type */
-            bld.AND(result, subscript(op[0], type, byte->u32[0] / 2), 
brw_imm_uw(0xff));
+            bld.AND(result, subscript(op[0], type, byte / 2), 
brw_imm_uw(0xff));
          }
       } else {
          const brw_reg_type type = brw_int_type(1, instr->op == 
nir_op_extract_i8);
-         bld.MOV(result, subscript(op[0], type, byte->u32[0]));
+         bld.MOV(result, subscript(op[0], type, byte));
       }
       break;
    }
@@ -1545,9 +1547,8 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, 
nir_alu_instr *instr)
    case nir_op_extract_u16:
    case nir_op_extract_i16: {
       const brw_reg_type type = brw_int_type(2, instr->op == 
nir_op_extract_i16);
-      nir_const_value *word = nir_src_as_const_value(instr->src[1].src);
-      assert(word != NULL);
-      bld.MOV(result, subscript(op[0], type, word->u32[0]));
+      unsigned word = nir_src_as_uint(instr->src[1].src);
+      bld.MOV(result, subscript(op[0], type, word));
       break;
    }
 
@@ -1658,9 +1659,9 @@ fs_visitor::get_nir_src(const nir_src &src)
 fs_reg
 fs_visitor::get_nir_src_imm(const nir_src &src)
 {
-   nir_const_value *val = nir_src_as_const_value(src);
    assert(nir_src_bit_size(src) == 32);
-   return val ? fs_reg(brw_imm_d(val->i32[0])) : get_nir_src(src);
+   return nir_src_is_const(src) ?
+          fs_reg(brw_imm_d(nir_src_as_int(src))) : get_nir_src(src);
 }
 
 fs_reg
@@ -2058,19 +2059,16 @@ fs_visitor::emit_gs_input_load(const fs_reg &dst,
                                unsigned first_component)
 {
    struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
-
-   nir_const_value *vertex_const = nir_src_as_const_value(vertex_src);
-   nir_const_value *offset_const = nir_src_as_const_value(offset_src);
    const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8;
 
    /* TODO: figure out push input layout for invocations == 1 */
    /* TODO: make this work with 64-bit inputs */
    if (gs_prog_data->invocations == 1 &&
        type_sz(dst.type) <= 4 &&
-       offset_const != NULL && vertex_const != NULL &&
-       4 * (base_offset + offset_const->u32[0]) < push_reg_count) {
-      int imm_offset = (base_offset + offset_const->u32[0]) * 4 +
-                       vertex_const->u32[0] * push_reg_count;
+       nir_src_is_const(offset_src) && nir_src_is_const(vertex_src) &&
+       4 * (base_offset + nir_src_as_uint(offset_src)) < push_reg_count) {
+      int imm_offset = (base_offset + nir_src_as_uint(offset_src)) * 4 +
+                       nir_src_as_uint(vertex_src) * push_reg_count;
       for (unsigned i = 0; i < num_components; i++) {
          bld.MOV(offset(dst, bld, i),
                  fs_reg(ATTR, imm_offset + i + first_component, dst.type));
@@ -2085,10 +2083,10 @@ fs_visitor::emit_gs_input_load(const fs_reg &dst,
    fs_reg icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
 
    if (gs_prog_data->invocations == 1) {
-      if (vertex_const) {
+      if (nir_src_is_const(vertex_src)) {
          /* The vertex index is constant; just select the proper URB handle. */
          icp_handle =
-            retype(brw_vec8_grf(first_icp_handle + vertex_const->i32[0], 0),
+            retype(brw_vec8_grf(first_icp_handle + 
nir_src_as_uint(vertex_src), 0),
                    BRW_REGISTER_TYPE_UD);
       } else {
          /* The vertex index is non-constant.  We need to use indirect
@@ -2129,12 +2127,11 @@ fs_visitor::emit_gs_input_load(const fs_reg &dst,
    } else {
       assert(gs_prog_data->invocations > 1);
 
-      if (vertex_const) {
-         assert(devinfo->gen >= 9 || vertex_const->i32[0] <= 5);
+      if (nir_src_is_const(vertex_src)) {
+         unsigned vertex = nir_src_as_uint(vertex_src);
+         assert(devinfo->gen >= 9 || vertex <= 5);
          bld.MOV(icp_handle,
-                 retype(brw_vec1_grf(first_icp_handle +
-                                     vertex_const->i32[0] / 8,
-                                     vertex_const->i32[0] % 8),
+                 retype(brw_vec1_grf(first_icp_handle + vertex / 8, vertex % 
8),
                         BRW_REGISTER_TYPE_UD));
       } else {
          /* The vertex index is non-constant.  We need to use indirect
@@ -2178,7 +2175,7 @@ fs_visitor::emit_gs_input_load(const fs_reg &dst,
    }
 
    for (unsigned iter = 0; iter < num_iterations; iter++) {
-      if (offset_const) {
+      if (nir_src_is_const(offset_src)) {
          /* Constant indexing - use global offset. */
          if (first_component != 0) {
             unsigned read_components = num_components + first_component;
@@ -2196,7 +2193,7 @@ fs_visitor::emit_gs_input_load(const fs_reg &dst,
             inst->size_written = num_components *
                                  tmp_dst.component_size(inst->exec_size);
          }
-         inst->offset = base_offset + offset_const->u32[0];
+         inst->offset = base_offset + nir_src_as_uint(offset_src);
          inst->mlen = 1;
       } else {
          /* Indirect indexing - use per-slot offsets as well. */
@@ -2234,7 +2231,7 @@ fs_visitor::emit_gs_input_load(const fs_reg &dst,
 
       if (num_iterations > 1) {
          num_components = orig_num_components - 2;
-         if(offset_const) {
+         if(nir_src_is_const(offset_src)) {
             base_offset++;
          } else {
             fs_reg new_indirect = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
@@ -2249,14 +2246,13 @@ fs_reg
 fs_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
 {
    nir_src *offset_src = nir_get_io_offset_src(instr);
-   nir_const_value *const_value = nir_src_as_const_value(*offset_src);
 
-   if (const_value) {
+   if (nir_src_is_const(*offset_src)) {
       /* The only constant offset we should find is 0.  brw_nir.c's
        * add_const_offset_to_base() will fold other constant offsets
        * into instr->const_index[0].
        */
-      assert(const_value->u32[0] == 0);
+      assert(nir_src_as_uint(*offset_src) == 0);
       return fs_reg();
    }
 
@@ -2384,9 +2380,7 @@ fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld,
       unsigned first_component = nir_intrinsic_component(instr);
       unsigned num_components = instr->num_components;
 
-      nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
-      assert(const_offset && "Indirect input loads not allowed");
-      src = offset(src, bld, const_offset->u32[0]);
+      src = offset(src, bld, nir_src_as_uint(instr->src[0]));
 
       if (type_sz(dest.type) == 8)
          first_component /= 2;
@@ -2473,18 +2467,17 @@ fs_visitor::nir_emit_tcs_intrinsic(const fs_builder 
&bld,
       unsigned imm_offset = instr->const_index[0];
 
       const nir_src &vertex_src = instr->src[0];
-      nir_const_value *vertex_const = nir_src_as_const_value(vertex_src);
 
       fs_inst *inst;
 
       fs_reg icp_handle;
 
-      if (vertex_const) {
+      if (nir_src_is_const(vertex_src)) {
          /* Emit a MOV to resolve <0,1,0> regioning. */
          icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
+         unsigned vertex = nir_src_as_uint(vertex_src);
          bld.MOV(icp_handle,
-                 retype(brw_vec1_grf(1 + (vertex_const->i32[0] >> 3),
-                                     vertex_const->i32[0] & 7),
+                 retype(brw_vec1_grf(1 + (vertex >> 3), vertex & 7),
                         BRW_REGISTER_TYPE_UD));
       } else if (tcs_prog_data->instances == 1 &&
                  vertex_src.is_ssa &&
@@ -3198,10 +3191,9 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
 
    case nir_intrinsic_store_output: {
       const fs_reg src = get_nir_src(instr->src[0]);
-      const nir_const_value *const_offset = 
nir_src_as_const_value(instr->src[1]);
-      assert(const_offset && "Indirect output stores not allowed");
+      const unsigned store_offset = nir_src_as_uint(instr->src[1]);
       const unsigned location = nir_intrinsic_base(instr) +
-         SET_FIELD(const_offset->u32[0], BRW_NIR_FRAG_OUTPUT_LOCATION);
+         SET_FIELD(store_offset, BRW_NIR_FRAG_OUTPUT_LOCATION);
       const fs_reg new_dest = retype(alloc_frag_output(this, location),
                                      src.type);
 
@@ -3216,9 +3208,8 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
       const unsigned l = GET_FIELD(nir_intrinsic_base(instr),
                                    BRW_NIR_FRAG_OUTPUT_LOCATION);
       assert(l >= FRAG_RESULT_DATA0);
-      nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
-      assert(const_offset && "Indirect output loads not allowed");
-      const unsigned target = l - FRAG_RESULT_DATA0 + const_offset->u32[0];
+      const unsigned load_offset = nir_src_as_uint(instr->src[0]);
+      const unsigned target = l - FRAG_RESULT_DATA0 + load_offset;
       const fs_reg tmp = bld.vgrf(dest.type, 4);
 
       if (reinterpret_cast<const brw_wm_prog_key *>(key)->coherent_fb_fetch)
@@ -3307,10 +3298,8 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
       const glsl_interp_mode interpolation =
          (enum glsl_interp_mode) nir_intrinsic_interp_mode(instr);
 
-      nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
-
-      if (const_sample) {
-         unsigned msg_data = const_sample->i32[0] << 4;
+      if (nir_src_is_const(instr->src[0])) {
+         unsigned msg_data = nir_src_as_uint(instr->src[0]) << 4;
 
          emit_pixel_interpolater_send(bld,
                                       FS_OPCODE_INTERPOLATE_AT_SAMPLE,
@@ -3379,6 +3368,7 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
       nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
 
       if (const_offset) {
+         assert(nir_src_bit_size(instr->src[0]) == 32);
          unsigned off_x = MIN2((int)(const_offset->f32[0] * 16), 7) & 0xf;
          unsigned off_y = MIN2((int)(const_offset->f32[1] * 16), 7) & 0xf;
 
@@ -3483,12 +3473,11 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
 static int
 get_op_for_atomic_add(nir_intrinsic_instr *instr, unsigned src)
 {
-   const nir_const_value *const val = nir_src_as_const_value(instr->src[src]);
-
-   if (val != NULL) {
-      if (val->i32[0] == 1)
+   if (nir_src_is_const(instr->src[src])) {
+      int64_t add_val = nir_src_as_int(instr->src[src]);
+      if (add_val == 1)
          return BRW_AOP_INC;
-      else if (val->i32[0] == -1)
+      else if (add_val == -1)
          return BRW_AOP_DEC;
    }
 
@@ -3597,9 +3586,9 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
 
       /* Get the offset to read from */
       fs_reg offset_reg;
-      nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
-      if (const_offset) {
-         offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u32[0]);
+      if (nir_src_is_const(instr->src[0])) {
+         offset_reg = brw_imm_ud(instr->const_index[0] +
+                                 nir_src_as_uint(instr->src[0]));
       } else {
          offset_reg = vgrf(glsl_type::uint_type);
          bld.ADD(offset_reg,
@@ -3657,9 +3646,9 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
             length = MIN2(2, length);
 
          fs_reg offset_reg;
-         nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
-         if (const_offset) {
-            offset_reg = brw_imm_ud(instr->const_index[0] + 
const_offset->u32[0] +
+         if (nir_src_is_const(instr->src[1])) {
+            offset_reg = brw_imm_ud(instr->const_index[0] +
+                                    nir_src_as_uint(instr->src[1]) +
                                     type_size * first_component);
          } else {
             offset_reg = vgrf(glsl_type::uint_type);
@@ -3777,13 +3766,11 @@ fs_visitor::get_nir_ssbo_intrinsic_index(const 
brw::fs_builder &bld,
 {
    /* SSBO stores are weird in that their index is in src[1] */
    const unsigned src = instr->intrinsic == nir_intrinsic_store_ssbo ? 1 : 0;
-   nir_const_value *const_uniform_block =
-      nir_src_as_const_value(instr->src[src]);
 
    fs_reg surf_index;
-   if (const_uniform_block) {
+   if (nir_src_is_const(instr->src[src])) {
       unsigned index = stage_prog_data->binding_table.ssbo_start +
-                       const_uniform_block->u32[0];
+                       nir_src_as_uint(instr->src[src]);
       surf_index = brw_imm_ud(index);
       brw_mark_surface_used(prog_data, index);
    } else {
@@ -4021,13 +4008,13 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, 
nir_intrinsic_instr *instr
 
       fs_reg src(UNIFORM, instr->const_index[0] / 4, dest.type);
 
-      nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
-      if (const_offset) {
-         assert(const_offset->u32[0] % type_sz(dest.type) == 0);
+      if (nir_src_is_const(instr->src[0])) {
+         unsigned load_offset = nir_src_as_uint(instr->src[0]);
+         assert(load_offset % type_sz(dest.type) == 0);
          /* For 16-bit types we add the module of the const_index[0]
           * offset to access to not 32-bit aligned element
           */
-         src.offset = const_offset->u32[0] + instr->const_index[0] % 4;
+         src.offset = load_offset + instr->const_index[0] % 4;
 
          for (unsigned j = 0; j < instr->num_components; j++) {
             bld.MOV(offset(dest, bld, j), offset(src, bld, j));
@@ -4077,12 +4064,10 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, 
nir_intrinsic_instr *instr
    }
 
    case nir_intrinsic_load_ubo: {
-      nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
       fs_reg surf_index;
-
-      if (const_index) {
+      if (nir_src_is_const(instr->src[0])) {
          const unsigned index = stage_prog_data->binding_table.ubo_start +
-                                const_index->u32[0];
+                                nir_src_as_uint(instr->src[0]);
          surf_index = brw_imm_ud(index);
          brw_mark_surface_used(prog_data, index);
       } else {
@@ -4103,8 +4088,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, 
nir_intrinsic_instr *instr
                                nir->info.num_ubos - 1);
       }
 
-      nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
-      if (const_offset == NULL) {
+      if (!nir_src_is_const(instr->src[1])) {
          fs_reg base_offset = retype(get_nir_src(instr->src[1]),
                                      BRW_REGISTER_TYPE_UD);
 
@@ -4121,11 +4105,12 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, 
nir_intrinsic_instr *instr
           * and we have to split it if necessary.
           */
          const unsigned type_size = type_sz(dest.type);
+         const unsigned load_offset = nir_src_as_uint(instr->src[1]);
 
          /* See if we've selected this as a push constant candidate */
-         if (const_index) {
-            const unsigned ubo_block = const_index->u32[0];
-            const unsigned offset_256b = const_offset->u32[0] / 32;
+         if (nir_src_is_const(instr->src[0])) {
+            const unsigned ubo_block = nir_src_as_uint(instr->src[0]);
+            const unsigned offset_256b = load_offset / 32;
 
             fs_reg push_reg;
             for (int i = 0; i < 4; i++) {
@@ -4135,7 +4120,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, 
nir_intrinsic_instr *instr
                    offset_256b < range->start + range->length) {
 
                   push_reg = fs_reg(UNIFORM, UBO_START + i, dest.type);
-                  push_reg.offset = const_offset->u32[0] - 32 * range->start;
+                  push_reg.offset = load_offset - 32 * range->start;
                   break;
                }
             }
@@ -4154,7 +4139,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, 
nir_intrinsic_instr *instr
          const fs_reg packed_consts = ubld.vgrf(BRW_REGISTER_TYPE_UD);
 
          for (unsigned c = 0; c < instr->num_components;) {
-            const unsigned base = const_offset->u32[0] + c * type_size;
+            const unsigned base = load_offset + c * type_size;
             /* Number of usable components in the next block-aligned load. */
             const unsigned count = MIN2(instr->num_components - c,
                                         (block_sz - base % block_sz) / 
type_size);
@@ -4222,8 +4207,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, 
nir_intrinsic_instr *instr
          unsigned num_components = ffs(~(writemask >> first_component)) - 1;
          fs_reg write_src = offset(val_reg, bld, first_component);
 
-         nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]);
-
          if (type_size > 4) {
             /* We can't write more than 2 64-bit components at once. Limit
              * the num_components of the write to what we can do and let the 
next
@@ -4249,8 +4232,9 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, 
nir_intrinsic_instr *instr
              * write message (the last component).
              */
 
-            if ( !const_offset || ((const_offset->u32[0] +
-                                   type_size * first_component) % 4)) {
+            if (!nir_src_is_const(instr->src[2]) ||
+                ((nir_src_as_uint(instr->src[2]) +
+                  type_size * first_component) % 4)) {
                /* If we use a .yz writemask we also need to emit 2
                 * byte-scattered write messages because of y-component not
                 * being aligned to 32-bit.
@@ -4277,8 +4261,8 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, 
nir_intrinsic_instr *instr
 
          fs_reg offset_reg;
 
-         if (const_offset) {
-            offset_reg = brw_imm_ud(const_offset->u32[0] +
+         if (nir_src_is_const(instr->src[2])) {
+            offset_reg = brw_imm_ud(nir_src_as_uint(instr->src[2]) +
                                     type_size * first_component);
          } else {
             offset_reg = vgrf(glsl_type::uint_type);
@@ -4322,9 +4306,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, 
nir_intrinsic_instr *instr
    case nir_intrinsic_store_output: {
       fs_reg src = get_nir_src(instr->src[0]);
 
-      nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
-      assert(const_offset && "Indirect output stores not allowed");
-
+      unsigned store_offset = nir_src_as_uint(instr->src[1]);
       unsigned num_components = instr->num_components;
       unsigned first_component = nir_intrinsic_component(instr);
       if (nir_src_bit_size(instr->src[0]) == 64) {
@@ -4333,7 +4315,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, 
nir_intrinsic_instr *instr
       }
 
       fs_reg new_dest = retype(offset(outputs[instr->const_index[0]], bld,
-                                      4 * const_offset->u32[0]), src.type);
+                                      4 * store_offset), src.type);
       for (unsigned j = 0; j < num_components; j++) {
          bld.MOV(offset(new_dest, bld, j + first_component),
                  offset(src, bld, j));
@@ -4382,8 +4364,8 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, 
nir_intrinsic_instr *instr
       break;
 
    case nir_intrinsic_get_buffer_size: {
-      nir_const_value *const_uniform_block = 
nir_src_as_const_value(instr->src[0]);
-      unsigned ssbo_index = const_uniform_block ? const_uniform_block->u32[0] 
: 0;
+      unsigned ssbo_index = nir_src_is_const(instr->src[0]) ?
+                            nir_src_as_uint(instr->src[0]) : 0;
 
       /* A resinfo's sampler message is used to get the buffer size.  The
        * SIMD8's writeback message consists of four registers and SIMD16's
@@ -4619,11 +4601,10 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, 
nir_intrinsic_instr *instr
 
    case nir_intrinsic_quad_broadcast: {
       const fs_reg value = get_nir_src(instr->src[0]);
-      nir_const_value *index = nir_src_as_const_value(instr->src[1]);
-      assert(nir_src_bit_size(instr->src[1]) == 32);
+      const unsigned index = nir_src_as_uint(instr->src[1]);
 
       bld.emit(SHADER_OPCODE_CLUSTER_BROADCAST, retype(dest, value.type),
-               value, brw_imm_ud(index->u32[0]), brw_imm_ud(4));
+               value, brw_imm_ud(index), brw_imm_ud(4));
       break;
    }
 
@@ -4899,9 +4880,9 @@ fs_visitor::nir_emit_shared_atomic(const fs_builder &bld,
       data2 = get_nir_src(instr->src[2]);
 
    /* Get the offset */
-   nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
-   if (const_offset) {
-      offset = brw_imm_ud(instr->const_index[0] + const_offset->u32[0]);
+   if (nir_src_is_const(instr->src[0])) {
+      offset = brw_imm_ud(instr->const_index[0] +
+                          nir_src_as_uint(instr->src[0]));
    } else {
       offset = vgrf(glsl_type::uint_type);
       bld.ADD(offset,
@@ -4936,9 +4917,9 @@ fs_visitor::nir_emit_shared_atomic_float(const fs_builder 
&bld,
       data2 = get_nir_src(instr->src[2]);
 
    /* Get the offset */
-   nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
-   if (const_offset) {
-      offset = brw_imm_ud(instr->const_index[0] + const_offset->u32[0]);
+   if (nir_src_is_const(instr->src[0])) {
+      offset = brw_imm_ud(instr->const_index[0] +
+                          nir_src_as_uint(instr->src[0]));
    } else {
       offset = vgrf(glsl_type::uint_type);
       bld.ADD(offset,
@@ -5028,6 +5009,7 @@ fs_visitor::nir_emit_texture(const fs_builder &bld, 
nir_tex_instr *instr)
       case nir_tex_src_offset: {
          nir_const_value *const_offset =
             nir_src_as_const_value(instr->src[i].src);
+         assert(nir_src_bit_size(instr->src[i].src) == 32);
          unsigned offset_bits = 0;
          if (const_offset &&
              brw_texture_offset(const_offset->i32,
@@ -5075,9 +5057,7 @@ fs_visitor::nir_emit_texture(const fs_builder &bld, 
nir_tex_instr *instr)
          break;
 
       case nir_tex_src_plane: {
-         nir_const_value *const_plane =
-            nir_src_as_const_value(instr->src[i].src);
-         const uint32_t plane = const_plane->u32[0];
+         const uint32_t plane = nir_src_as_uint(instr->src[i].src);
          const uint32_t texture_index =
             instr->texture_index +
             stage_prog_data->binding_table.plane_start[plane] -
-- 
2.19.1

_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to