The transformations with inverted immediate are now done generically and need not be handled by the backend.
Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- tcg/s390x/tcg-target.c.inc | 56 ++++++-------------------------------- 1 file changed, 8 insertions(+), 48 deletions(-) diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc index ad587325fc..b9a3e6e56a 100644 --- a/tcg/s390x/tcg-target.c.inc +++ b/tcg/s390x/tcg-target.c.inc @@ -2216,31 +2216,13 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_andc_i32: - a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I32, a0, a1); - tgen_andi(s, TCG_TYPE_I32, a0, (uint32_t)~a2); - } else { - tcg_out_insn(s, RRFa, NCRK, a0, a1, a2); - } + tcg_out_insn(s, RRFa, NCRK, args[0], args[1], args[2]); break; case INDEX_op_orc_i32: - a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I32, a0, a1); - tgen_ori(s, a0, (uint32_t)~a2); - } else { - tcg_out_insn(s, RRFa, OCRK, a0, a1, a2); - } + tcg_out_insn(s, RRFa, OCRK, args[0], args[1], args[2]); break; case INDEX_op_eqv_i32: - a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I32, a0, a1); - tcg_out_insn(s, RIL, XILF, a0, ~a2); - } else { - tcg_out_insn(s, RRFa, NXRK, a0, a1, a2); - } + tcg_out_insn(s, RRFa, NXRK, args[0], args[1], args[2]); break; case INDEX_op_nand_i32: tcg_out_insn(s, RRFa, NNRK, args[0], args[1], args[2]); @@ -2517,31 +2499,13 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_andc_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I64, a0, a1); - tgen_andi(s, TCG_TYPE_I64, a0, ~a2); - } else { - tcg_out_insn(s, RRFa, NCGRK, a0, a1, a2); - } + tcg_out_insn(s, RRFa, NCGRK, args[0], args[1], args[2]); break; case INDEX_op_orc_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I64, a0, a1); - tgen_ori(s, a0, ~a2); - } else { - tcg_out_insn(s, RRFa, OCGRK, a0, a1, a2); - } + tcg_out_insn(s, RRFa, OCGRK, args[0], args[1], args[2]); break; case INDEX_op_eqv_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I64, a0, a1); - tgen_xori(s, a0, ~a2); - } else { - tcg_out_insn(s, RRFa, NXGRK, a0, a1, a2); - } + tcg_out_insn(s, RRFa, NXGRK, args[0], args[1], args[2]); break; case INDEX_op_nand_i64: tcg_out_insn(s, RRFa, NNGRK, args[0], args[1], args[2]); @@ -3244,15 +3208,11 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) return C_O1_I2(r, r, rK); case INDEX_op_andc_i32: - case INDEX_op_orc_i32: - case INDEX_op_eqv_i32: - return C_O1_I2(r, r, ri); case INDEX_op_andc_i64: - return C_O1_I2(r, r, rKR); + case INDEX_op_orc_i32: case INDEX_op_orc_i64: + case INDEX_op_eqv_i32: case INDEX_op_eqv_i64: - return C_O1_I2(r, r, rNK); - case INDEX_op_nand_i32: case INDEX_op_nand_i64: case INDEX_op_nor_i32: -- 2.34.1