The transformations with inverted immediate are now done generically and need not be handled by the backend.
Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- tcg/ppc/tcg-target.c.inc | 32 +++++--------------------------- 1 file changed, 5 insertions(+), 27 deletions(-) diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index 7f3829beeb..336b8a28ba 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -3070,36 +3070,14 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, } break; case INDEX_op_andc_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_andi32(s, a0, a1, ~a2); - } else { - tcg_out32(s, ANDC | SAB(a1, a0, a2)); - } - break; case INDEX_op_andc_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_andi64(s, a0, a1, ~a2); - } else { - tcg_out32(s, ANDC | SAB(a1, a0, a2)); - } + tcg_out32(s, ANDC | SAB(args[1], args[0], args[2])); break; case INDEX_op_orc_i32: - if (const_args[2]) { - tcg_out_ori32(s, args[0], args[1], ~args[2]); - break; - } - /* FALLTHRU */ case INDEX_op_orc_i64: tcg_out32(s, ORC | SAB(args[1], args[0], args[2])); break; case INDEX_op_eqv_i32: - if (const_args[2]) { - tcg_out_xori32(s, args[0], args[1], ~args[2]); - break; - } - /* FALLTHRU */ case INDEX_op_eqv_i64: tcg_out32(s, EQV | SAB(args[1], args[0], args[2])); break; @@ -4120,16 +4098,12 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_and_i32: case INDEX_op_or_i32: case INDEX_op_xor_i32: - case INDEX_op_andc_i32: - case INDEX_op_orc_i32: - case INDEX_op_eqv_i32: case INDEX_op_shl_i32: case INDEX_op_shr_i32: case INDEX_op_sar_i32: case INDEX_op_rotl_i32: case INDEX_op_rotr_i32: case INDEX_op_and_i64: - case INDEX_op_andc_i64: case INDEX_op_shl_i64: case INDEX_op_shr_i64: case INDEX_op_sar_i64: @@ -4145,10 +4119,14 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_divu_i32: case INDEX_op_rem_i32: case INDEX_op_remu_i32: + case INDEX_op_andc_i32: + case INDEX_op_orc_i32: + case INDEX_op_eqv_i32: case INDEX_op_nand_i32: case INDEX_op_nor_i32: case INDEX_op_muluh_i32: case INDEX_op_mulsh_i32: + case INDEX_op_andc_i64: case INDEX_op_orc_i64: case INDEX_op_eqv_i64: case INDEX_op_nand_i64: -- 2.34.1