Hi!

On 2024-03-22T14:15:36+0000, Andrew Stubbs <a...@baylibre.com> wrote:
> On 22/03/2024 08:43, Richard Biener wrote:
> Thanks, here's what I pushed.

> vect: more oversized bitmask fixups
>
> These patches fix up a failure in testcase vect/tsvc/vect-tsvc-s278.c when
> configured to use V32 instead of V64 (I plan to do this for RDNA devices).

Thanks, confirming that this "vect: more oversized bitmask fixups" does
fix the GCN target '-march=gfx1100' testing regression:

    PASS: gcc.dg/vect/tsvc/vect-tsvc-s278.c (test for excess errors)
    [-PASS:-]{+FAIL:+} gcc.dg/vect/tsvc/vect-tsvc-s278.c execution test
    XPASS: gcc.dg/vect/tsvc/vect-tsvc-s278.c scan-tree-dump vect "vectorized 1 
loops"

    PASS: gcc.dg/vect/tsvc/vect-tsvc-s279.c (test for excess errors)
    [-PASS:-]{+FAIL:+} gcc.dg/vect/tsvc/vect-tsvc-s279.c execution test
    XPASS: gcc.dg/vect/tsvc/vect-tsvc-s279.c scan-tree-dump vect "vectorized 1 
loops"

... that I saw introduced by "amdgcn: Prefer V32 on RDNA devices".

(The XPASSes are independent of that, pre-existing.)


Grüße
 Thomas


> The problem was that a "not" operation on the mask inadvertently enabled
> inactive lanes 31-63 and corrupted the output.  The fix is to adjust the mask
> when calling internal functions (in this case COND_MINUS), when doing masked
> loads and stores, and when doing conditional jumps (some cases were already
> handled).
>
> gcc/ChangeLog:
>
>       * dojump.cc (do_compare_rtx_and_jump): Clear excess bits in vector
>       bitmasks.
>       (do_compare_and_jump): Remove now-redundant similar code.
>       * internal-fn.cc (expand_fn_using_insn): Clear excess bits in vector
>       bitmasks.
>       (add_mask_and_len_args): Likewise.
>
> diff --git a/gcc/dojump.cc b/gcc/dojump.cc
> index 88600cb42d3..5f74b696b41 100644
> --- a/gcc/dojump.cc
> +++ b/gcc/dojump.cc
> @@ -1235,6 +1235,24 @@ do_compare_rtx_and_jump (rtx op0, rtx op1, enum 
> rtx_code code, int unsignedp,
>           }
>       }
>  
> +      /* For boolean vectors with less than mode precision
> +      make sure to fill padding with consistent values.  */
> +      if (val
> +       && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (val))
> +       && SCALAR_INT_MODE_P (mode))
> +     {
> +       auto nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (val)).to_constant ();
> +       if (maybe_ne (GET_MODE_PRECISION (mode), nunits))
> +         {
> +           op0 = expand_binop (mode, and_optab, op0,
> +                               GEN_INT ((HOST_WIDE_INT_1U << nunits) - 1),
> +                               NULL_RTX, true, OPTAB_WIDEN);
> +           op1 = expand_binop (mode, and_optab, op1,
> +                               GEN_INT ((HOST_WIDE_INT_1U << nunits) - 1),
> +                               NULL_RTX, true, OPTAB_WIDEN);
> +         }
> +     }
> +
>        emit_cmp_and_jump_insns (op0, op1, code, size, mode, unsignedp, val,
>                              if_true_label, prob);
>      }
> @@ -1266,7 +1284,6 @@ do_compare_and_jump (tree treeop0, tree treeop1, enum 
> rtx_code signed_code,
>    machine_mode mode;
>    int unsignedp;
>    enum rtx_code code;
> -  unsigned HOST_WIDE_INT nunits;
>  
>    /* Don't crash if the comparison was erroneous.  */
>    op0 = expand_normal (treeop0);
> @@ -1309,21 +1326,6 @@ do_compare_and_jump (tree treeop0, tree treeop1, enum 
> rtx_code signed_code,
>        emit_insn (targetm.gen_canonicalize_funcptr_for_compare (new_op1, 
> op1));
>        op1 = new_op1;
>      }
> -  /* For boolean vectors with less than mode precision
> -     make sure to fill padding with consistent values.  */
> -  else if (VECTOR_BOOLEAN_TYPE_P (type)
> -        && SCALAR_INT_MODE_P (mode)
> -        && TYPE_VECTOR_SUBPARTS (type).is_constant (&nunits)
> -        && maybe_ne (GET_MODE_PRECISION (mode), nunits))
> -    {
> -      gcc_assert (code == EQ || code == NE);
> -      op0 = expand_binop (mode, and_optab, op0,
> -                       GEN_INT ((HOST_WIDE_INT_1U << nunits) - 1), NULL_RTX,
> -                       true, OPTAB_WIDEN);
> -      op1 = expand_binop (mode, and_optab, op1,
> -                       GEN_INT ((HOST_WIDE_INT_1U << nunits) - 1), NULL_RTX,
> -                       true, OPTAB_WIDEN);
> -    }
>  
>    do_compare_rtx_and_jump (op0, op1, code, unsignedp, treeop0, mode,
>                          ((mode == BLKmode)
> diff --git a/gcc/internal-fn.cc b/gcc/internal-fn.cc
> index fcf47c7fa12..5269f0ac528 100644
> --- a/gcc/internal-fn.cc
> +++ b/gcc/internal-fn.cc
> @@ -245,6 +245,18 @@ expand_fn_using_insn (gcall *stmt, insn_code icode, 
> unsigned int noutputs,
>              && SSA_NAME_IS_DEFAULT_DEF (rhs)
>              && VAR_P (SSA_NAME_VAR (rhs)))
>       create_undefined_input_operand (&ops[opno], TYPE_MODE (rhs_type));
> +      else if (VECTOR_BOOLEAN_TYPE_P (rhs_type)
> +            && SCALAR_INT_MODE_P (TYPE_MODE (rhs_type))
> +            && maybe_ne (GET_MODE_PRECISION (TYPE_MODE (rhs_type)),
> +                         TYPE_VECTOR_SUBPARTS (rhs_type).to_constant ()))
> +     {
> +       /* Ensure that the vector bitmasks do not have excess bits.  */
> +       int nunits = TYPE_VECTOR_SUBPARTS (rhs_type).to_constant ();
> +       rtx tmp = expand_binop (TYPE_MODE (rhs_type), and_optab, rhs_rtx,
> +                               GEN_INT ((HOST_WIDE_INT_1U << nunits) - 1),
> +                               NULL_RTX, true, OPTAB_WIDEN);
> +       create_input_operand (&ops[opno], tmp, TYPE_MODE (rhs_type));
> +     }
>        else
>       create_input_operand (&ops[opno], rhs_rtx, TYPE_MODE (rhs_type));
>        opno += 1;
> @@ -312,6 +324,20 @@ add_mask_and_len_args (expand_operand *ops, unsigned int 
> opno, gcall *stmt)
>      {
>        tree mask = gimple_call_arg (stmt, mask_index);
>        rtx mask_rtx = expand_normal (mask);
> +
> +      tree mask_type = TREE_TYPE (mask);
> +      if (VECTOR_BOOLEAN_TYPE_P (mask_type)
> +       && SCALAR_INT_MODE_P (TYPE_MODE (mask_type))
> +       && maybe_ne (GET_MODE_PRECISION (TYPE_MODE (mask_type)),
> +                    TYPE_VECTOR_SUBPARTS (mask_type).to_constant ()))
> +     {
> +       /* Ensure that the vector bitmasks do not have excess bits.  */
> +       int nunits = TYPE_VECTOR_SUBPARTS (mask_type).to_constant ();
> +       mask_rtx = expand_binop (TYPE_MODE (mask_type), and_optab, mask_rtx,
> +                                GEN_INT ((HOST_WIDE_INT_1U << nunits) - 1),
> +                                NULL_RTX, true, OPTAB_WIDEN);
> +     }
> +
>        create_input_operand (&ops[opno++], mask_rtx,
>                           TYPE_MODE (TREE_TYPE (mask)));
>      }

Reply via email to