This gives us 32-bit immediate addends. Signed-off-by: Richard Henderson <r...@twiddle.net> --- tcg/s390/tcg-target.c | 68 +++++++++++++++++++++++++++++++++++++----------- 1 files changed, 52 insertions(+), 16 deletions(-)
diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c index cf70cc2..caa2d0d 100644 --- a/tcg/s390/tcg-target.c +++ b/tcg/s390/tcg-target.c @@ -33,14 +33,16 @@ do { } while (0) #endif -#define TCG_CT_CONST_S16 0x100 -#define TCG_CT_CONST_U12 0x200 +#define TCG_CT_CONST_S32 0x100 +#define TCG_CT_CONST_N32 0x200 /* All of the following instructions are prefixed with their instruction format, and are defined as 8- or 16-bit quantities, even when the two halves of the 16-bit quantity may appear 32 bits apart in the insn. This makes it easy to copy the values from the tables in Appendix B. */ typedef enum S390Opcode { + RIL_AFI = 0xc209, + RIL_AGFI = 0xc208, RIL_BRASL = 0xc005, RIL_BRCL = 0xc004, RIL_LARL = 0xc000, @@ -288,7 +290,11 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) break; case 'I': ct->ct &= ~TCG_CT_REG; - ct->ct |= TCG_CT_CONST_S16; + ct->ct |= TCG_CT_CONST_S32; + break; + case 'J': + ct->ct &= ~TCG_CT_REG; + ct->ct |= TCG_CT_CONST_N32; break; default: break; @@ -305,10 +311,12 @@ static inline int tcg_target_const_match(tcg_target_long val, { int ct = arg_ct->ct; - if ((ct & TCG_CT_CONST) || - ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) || - ((ct & TCG_CT_CONST_U12) && val == (val & 0xfff))) { + if (ct & TCG_CT_CONST) { return 1; + } else if (ct & TCG_CT_CONST_S32) { + return val == (int32_t)val; + } else if (ct & TCG_CT_CONST_N32) { + return -val == (int32_t)-val; } return 0; @@ -529,6 +537,24 @@ static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src) tcg_out_insn(s, RRE, LLGFR, dest, src); } +static inline void tgen32_addi(TCGContext *s, TCGReg dest, tcg_target_long val) +{ + if (val == (int16_t)val) { + tcg_out_insn(s, RI, AHI, dest, val); + } else { + tcg_out_insn(s, RIL, AFI, dest, val); + } +} + +static inline void tgen64_addi(TCGContext *s, TCGReg dest, tcg_target_long val) +{ + if (val == (int16_t)val) { + tcg_out_insn(s, RI, AGHI, dest, val); + } else { + tcg_out_insn(s, RIL, AGFI, dest, val); + } +} + static void tgen32_cmp(TCGContext *s, TCGCond c, TCGReg r1, TCGReg r2) { if (c > TCG_COND_GT) { @@ -974,22 +1000,32 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_add_i32: if (const_args[2]) { - tcg_out_insn(s, RI, AHI, args[0], args[2]); + tgen32_addi(s, args[0], args[2]); } else { tcg_out_insn(s, RR, AR, args[0], args[2]); } break; - case INDEX_op_add_i64: - tcg_out_insn(s, RRE, AGR, args[0], args[2]); + if (const_args[2]) { + tgen64_addi(s, args[0], args[2]); + } else { + tcg_out_insn(s, RRE, AGR, args[0], args[2]); + } break; case INDEX_op_sub_i32: - tcg_out_insn(s, RR, SR, args[0], args[2]); + if (const_args[2]) { + tgen32_addi(s, args[0], -args[2]); + } else { + tcg_out_insn(s, RR, SR, args[0], args[2]); + } break; - case INDEX_op_sub_i64: - tcg_out_insn(s, RRE, SGR, args[0], args[2]); + if (const_args[2]) { + tgen64_addi(s, args[0], -args[2]); + } else { + tcg_out_insn(s, RRE, SGR, args[0], args[2]); + } break; case INDEX_op_and_i32: @@ -1254,8 +1290,8 @@ static const TCGTargetOpDef s390_op_defs[] = { { INDEX_op_st16_i32, { "r", "r" } }, { INDEX_op_st_i32, { "r", "r" } }, - { INDEX_op_add_i32, { "r", "0", "rI" } }, - { INDEX_op_sub_i32, { "r", "0", "r" } }, + { INDEX_op_add_i32, { "r", "0", "ri" } }, + { INDEX_op_sub_i32, { "r", "0", "ri" } }, { INDEX_op_mul_i32, { "r", "0", "r" } }, { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } }, @@ -1315,8 +1351,8 @@ static const TCGTargetOpDef s390_op_defs[] = { { INDEX_op_st32_i64, { "r", "r" } }, { INDEX_op_st_i64, { "r", "r" } }, - { INDEX_op_add_i64, { "r", "0", "r" } }, - { INDEX_op_sub_i64, { "r", "0", "r" } }, + { INDEX_op_add_i64, { "r", "0", "rI" } }, + { INDEX_op_sub_i64, { "r", "0", "rJ" } }, { INDEX_op_mul_i64, { "r", "0", "r" } }, { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } }, -- 1.7.0.1