Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- include/tcg/tcg-opc.h | 5 +---- tcg/optimize.c | 9 +++++---- tcg/tcg-op.c | 8 ++++---- tcg/tcg.c | 6 ++---- tcg/tci.c | 4 ++-- docs/devel/tcg-ops.rst | 2 +- tcg/tci/tcg-target.c.inc | 2 +- 7 files changed, 16 insertions(+), 20 deletions(-)
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h index 040f4da835..ebb23347e9 100644 --- a/include/tcg/tcg-opc.h +++ b/include/tcg/tcg-opc.h @@ -57,6 +57,7 @@ DEF(not, 1, 1, 0, TCG_OPF_INT) DEF(or, 1, 2, 0, TCG_OPF_INT) DEF(orc, 1, 2, 0, TCG_OPF_INT) DEF(rems, 1, 2, 0, TCG_OPF_INT) +DEF(remu, 1, 2, 0, TCG_OPF_INT) DEF(sub, 1, 2, 0, TCG_OPF_INT) DEF(xor, 1, 2, 0, TCG_OPF_INT) @@ -72,8 +73,6 @@ DEF(ld_i32, 1, 1, 1, 0) DEF(st8_i32, 0, 2, 1, 0) DEF(st16_i32, 0, 2, 1, 0) DEF(st_i32, 0, 2, 1, 0) -/* arith */ -DEF(remu_i32, 1, 2, 0, 0) /* shifts/rotates */ DEF(shl_i32, 1, 2, 0, 0) DEF(shr_i32, 1, 2, 0, 0) @@ -115,8 +114,6 @@ DEF(st8_i64, 0, 2, 1, 0) DEF(st16_i64, 0, 2, 1, 0) DEF(st32_i64, 0, 2, 1, 0) DEF(st_i64, 0, 2, 1, 0) -/* arith */ -DEF(remu_i64, 1, 2, 0, 0) /* shifts/rotates */ DEF(shl_i64, 1, 2, 0, 0) DEF(shr_i64, 1, 2, 0, 0) diff --git a/tcg/optimize.c b/tcg/optimize.c index 55663ff4c3..6f0887f808 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -563,9 +563,10 @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type, } return (int64_t)x % ((int64_t)y ? : 1); - case INDEX_op_remu_i32: - return (uint32_t)x % ((uint32_t)y ? : 1); - case INDEX_op_remu_i64: + case INDEX_op_remu: + if (type == TCG_TYPE_I32) { + return (uint32_t)x % ((uint32_t)y ? : 1); + } return (uint64_t)x % ((uint64_t)y ? : 1); default: @@ -3012,7 +3013,7 @@ void tcg_optimize(TCGContext *s) done = fold_qemu_st(&ctx, op); break; case INDEX_op_rems: - CASE_OP_32_64(remu): + case INDEX_op_remu: done = fold_remainder(&ctx, op); break; CASE_OP_32_64(rotl): diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index 4ff6c9f0ab..0f1e83a49f 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -649,8 +649,8 @@ void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - if (tcg_op_supported(INDEX_op_remu_i32, TCG_TYPE_I32, 0)) { - tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2); + if (tcg_op_supported(INDEX_op_remu, TCG_TYPE_I32, 0)) { + tcg_gen_op3_i32(INDEX_op_remu, ret, arg1, arg2); } else if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I32, 0)) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_op3_i32(INDEX_op_divu, t0, arg1, arg2); @@ -2017,8 +2017,8 @@ void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { - if (tcg_op_supported(INDEX_op_remu_i64, TCG_TYPE_I64, 0)) { - tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2); + if (tcg_op_supported(INDEX_op_remu, TCG_TYPE_I64, 0)) { + tcg_gen_op3_i64(INDEX_op_remu, ret, arg1, arg2); } else if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I64, 0)) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_op3_i64(INDEX_op_divu, t0, arg1, arg2); diff --git a/tcg/tcg.c b/tcg/tcg.c index 41371823d4..12418d1aa6 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -1041,8 +1041,7 @@ static const TCGOutOp * const all_outop[NB_OPS] = { OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or), OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc), OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems), - OUTOP(INDEX_op_remu_i32, TCGOutOpBinary, outop_remu), - OUTOP(INDEX_op_remu_i64, TCGOutOpBinary, outop_remu), + OUTOP(INDEX_op_remu, TCGOutOpBinary, outop_remu), OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub), OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor), }; @@ -5417,8 +5416,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) case INDEX_op_or: case INDEX_op_orc: case INDEX_op_rems: - case INDEX_op_remu_i32: - case INDEX_op_remu_i64: + case INDEX_op_remu: case INDEX_op_xor: { const TCGOutOpBinary *out = diff --git a/tcg/tci.c b/tcg/tci.c index bd5817a382..5d2cba4941 100644 --- a/tcg/tci.c +++ b/tcg/tci.c @@ -732,7 +732,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2]; break; - case INDEX_op_remu_i64: + case INDEX_op_remu: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2]; break; @@ -1080,9 +1080,9 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) case INDEX_op_or: case INDEX_op_orc: case INDEX_op_rems: + case INDEX_op_remu: case INDEX_op_sub: case INDEX_op_xor: - case INDEX_op_remu_i64: case INDEX_op_shl_i32: case INDEX_op_shl_i64: case INDEX_op_shr_i32: diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst index 1f4160a585..bceecb0596 100644 --- a/docs/devel/tcg-ops.rst +++ b/docs/devel/tcg-ops.rst @@ -292,7 +292,7 @@ Arithmetic - | *t0* = *t1* % *t2* (signed) | Undefined behavior if division by zero or overflow. - * - remu_i32/i64 *t0*, *t1*, *t2* + * - remu *t0*, *t1*, *t2* - | *t0* = *t1* % *t2* (unsigned) | Undefined behavior if division by zero. diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc index 421a2a8ac7..eb30fd04ba 100644 --- a/tcg/tci/tcg-target.c.inc +++ b/tcg/tci/tcg-target.c.inc @@ -769,7 +769,7 @@ static void tgen_remu(TCGContext *s, TCGType type, { TCGOpcode opc = (type == TCG_TYPE_I32 ? INDEX_op_tci_remu32 - : INDEX_op_remu_i64); + : INDEX_op_remu); tcg_out_op_rrr(s, opc, a0, a1, a2); } -- 2.43.0