Rename to INDEX_op_rems to emphasize signed inputs, and mirroring INDEX_op_remu_*.
Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- include/tcg/tcg-opc.h | 3 +-- tcg/optimize.c | 12 +++++++----- tcg/tcg-op.c | 8 ++++---- tcg/tcg.c | 6 ++---- tcg/tci.c | 4 ++-- docs/devel/tcg-ops.rst | 2 +- tcg/tci/tcg-target.c.inc | 2 +- 7 files changed, 18 insertions(+), 19 deletions(-) diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h index 61e5e185cc..040f4da835 100644 --- a/include/tcg/tcg-opc.h +++ b/include/tcg/tcg-opc.h @@ -56,6 +56,7 @@ DEF(nor, 1, 2, 0, TCG_OPF_INT) DEF(not, 1, 1, 0, TCG_OPF_INT) DEF(or, 1, 2, 0, TCG_OPF_INT) DEF(orc, 1, 2, 0, TCG_OPF_INT) +DEF(rems, 1, 2, 0, TCG_OPF_INT) DEF(sub, 1, 2, 0, TCG_OPF_INT) DEF(xor, 1, 2, 0, TCG_OPF_INT) @@ -72,7 +73,6 @@ DEF(st8_i32, 0, 2, 1, 0) DEF(st16_i32, 0, 2, 1, 0) DEF(st_i32, 0, 2, 1, 0) /* arith */ -DEF(rem_i32, 1, 2, 0, 0) DEF(remu_i32, 1, 2, 0, 0) /* shifts/rotates */ DEF(shl_i32, 1, 2, 0, 0) @@ -116,7 +116,6 @@ DEF(st16_i64, 0, 2, 1, 0) DEF(st32_i64, 0, 2, 1, 0) DEF(st_i64, 0, 2, 1, 0) /* arith */ -DEF(rem_i64, 1, 2, 0, 0) DEF(remu_i64, 1, 2, 0, 0) /* shifts/rotates */ DEF(shl_i64, 1, 2, 0, 0) diff --git a/tcg/optimize.c b/tcg/optimize.c index 77386eacb7..55663ff4c3 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -557,12 +557,14 @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type, } return (uint64_t)x / ((uint64_t)y ? : 1); - case INDEX_op_rem_i32: - return (int32_t)x % ((int32_t)y ? : 1); + case INDEX_op_rems: + if (type == TCG_TYPE_I32) { + return (int32_t)x % ((int32_t)y ? : 1); + } + return (int64_t)x % ((int64_t)y ? : 1); + case INDEX_op_remu_i32: return (uint32_t)x % ((uint32_t)y ? : 1); - case INDEX_op_rem_i64: - return (int64_t)x % ((int64_t)y ? : 1); case INDEX_op_remu_i64: return (uint64_t)x % ((uint64_t)y ? : 1); @@ -3009,7 +3011,7 @@ void tcg_optimize(TCGContext *s) case INDEX_op_qemu_st_i128: done = fold_qemu_st(&ctx, op); break; - CASE_OP_32_64(rem): + case INDEX_op_rems: CASE_OP_32_64(remu): done = fold_remainder(&ctx, op); break; diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index 7ed92157de..6da8b30547 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -615,8 +615,8 @@ void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - if (TCG_TARGET_HAS_rem_i32) { - tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2); + if (tcg_op_supported(INDEX_op_rems, TCG_TYPE_I32, 0)) { + tcg_gen_op3_i32(INDEX_op_rems, ret, arg1, arg2); } else if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I32, 0)) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_op3_i32(INDEX_op_divs, t0, arg1, arg2); @@ -1983,8 +1983,8 @@ void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { - if (TCG_TARGET_HAS_rem_i64) { - tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2); + if (tcg_op_supported(INDEX_op_rems, TCG_TYPE_I64, 0)) { + tcg_gen_op3_i64(INDEX_op_rems, ret, arg1, arg2); } else if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I64, 0)) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_op3_i64(INDEX_op_divs, t0, arg1, arg2); diff --git a/tcg/tcg.c b/tcg/tcg.c index 5b75106943..c740d310cd 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -1040,8 +1040,7 @@ static const TCGOutOp * const all_outop[NB_OPS] = { OUTOP(INDEX_op_not, TCGOutOpUnary, outop_not), OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or), OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc), - OUTOP(INDEX_op_rem_i32, TCGOutOpBinary, outop_rems), - OUTOP(INDEX_op_rem_i64, TCGOutOpBinary, outop_rems), + OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems), OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub), OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor), }; @@ -5419,8 +5418,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) case INDEX_op_nor: case INDEX_op_or: case INDEX_op_orc: - case INDEX_op_rem_i32: - case INDEX_op_rem_i64: + case INDEX_op_rems: case INDEX_op_xor: { const TCGOutOpBinary *out = diff --git a/tcg/tci.c b/tcg/tci.c index 65f493c3d4..6ca033f3be 100644 --- a/tcg/tci.c +++ b/tcg/tci.c @@ -728,7 +728,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2]; break; - case INDEX_op_rem_i64: + case INDEX_op_rems: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2]; break; @@ -1079,9 +1079,9 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) case INDEX_op_nor: case INDEX_op_or: case INDEX_op_orc: + case INDEX_op_rems: case INDEX_op_sub: case INDEX_op_xor: - case INDEX_op_rem_i64: case INDEX_op_remu_i32: case INDEX_op_remu_i64: case INDEX_op_shl_i32: diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst index 8f3b5e91b2..1f4160a585 100644 --- a/docs/devel/tcg-ops.rst +++ b/docs/devel/tcg-ops.rst @@ -287,7 +287,7 @@ Arithmetic - | *t0* = *t1* / *t2* (unsigned) | Undefined behavior if division by zero. - * - rem_i32/i64 *t0*, *t1*, *t2* + * - rems *t0*, *t1*, *t2* - | *t0* = *t1* % *t2* (signed) | Undefined behavior if division by zero or overflow. diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc index 4d9c142a00..2b05da7d06 100644 --- a/tcg/tci/tcg-target.c.inc +++ b/tcg/tci/tcg-target.c.inc @@ -757,7 +757,7 @@ static void tgen_rems(TCGContext *s, TCGType type, { TCGOpcode opc = (type == TCG_TYPE_I32 ? INDEX_op_tci_rems32 - : INDEX_op_rem_i64); + : INDEX_op_rems); tcg_out_op_rrr(s, opc, a0, a1, a2); } -- 2.43.0