Adjust the softmmu tlb to use R0+R1, not any of the normally available registers. Since we handle overlap betwen inputs and helper arguments, we can allow any allocatable reg.
Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- tcg/s390x/tcg-target-con-set.h | 2 -- tcg/s390x/tcg-target-con-str.h | 1 - tcg/s390x/tcg-target.c.inc | 36 ++++++++++++---------------------- 3 files changed, 12 insertions(+), 27 deletions(-) diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h index 15f1c55103..ecc079bb6d 100644 --- a/tcg/s390x/tcg-target-con-set.h +++ b/tcg/s390x/tcg-target-con-set.h @@ -10,12 +10,10 @@ * tcg-target-con-str.h; the constraint combination is inclusive or. */ C_O0_I1(r) -C_O0_I2(L, L) C_O0_I2(r, r) C_O0_I2(r, ri) C_O0_I2(r, rA) C_O0_I2(v, r) -C_O1_I1(r, L) C_O1_I1(r, r) C_O1_I1(v, r) C_O1_I1(v, v) diff --git a/tcg/s390x/tcg-target-con-str.h b/tcg/s390x/tcg-target-con-str.h index 6fa64a1ed6..25675b449e 100644 --- a/tcg/s390x/tcg-target-con-str.h +++ b/tcg/s390x/tcg-target-con-str.h @@ -9,7 +9,6 @@ * REGS(letter, register_mask) */ REGS('r', ALL_GENERAL_REGS) -REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) REGS('v', ALL_VECTOR_REGS) REGS('o', 0xaaaa) /* odd numbered general regs */ diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc index 42d3e13e08..a380982f86 100644 --- a/tcg/s390x/tcg-target.c.inc +++ b/tcg/s390x/tcg-target.c.inc @@ -44,18 +44,6 @@ #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16) #define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) -/* - * For softmmu, we need to avoid conflicts with the first 3 - * argument registers to perform the tlb lookup, and to call - * the helper function. - */ -#ifdef CONFIG_SOFTMMU -#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_R2, 3) -#else -#define SOFTMMU_RESERVE_REGS 0 -#endif - - /* Several places within the instruction set 0 means "no register" rather than TCG_REG_R0. */ #define TCG_REG_NONE 0 @@ -1734,10 +1722,10 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, int ofs, a_off; uint64_t tlb_mask; - tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE, + tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); - tcg_out_insn(s, RXY, NG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, mask_off); - tcg_out_insn(s, RXY, AG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, table_off); + tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off); + tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off); /* For aligned accesses, we check the first byte and include the alignment bits within the address. For unaligned access, we check that we don't @@ -1745,10 +1733,10 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask); tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; if (a_off == 0) { - tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask); + tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask); } else { - tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off); - tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask); + tcg_out_insn(s, RX, LA, TCG_REG_R0, addr_reg, TCG_REG_NONE, a_off); + tgen_andi(s, TCG_TYPE_TL, TCG_REG_R0, tlb_mask); } if (is_ld) { @@ -1757,14 +1745,14 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, ofs = offsetof(CPUTLBEntry, addr_write); } if (TARGET_LONG_BITS == 32) { - tcg_out_insn(s, RX, C, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs); + tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs); } else { - tcg_out_insn(s, RXY, CG, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs); + tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs); } - tcg_out_insn(s, RXY, LG, TCG_REG_R2, TCG_REG_R2, TCG_REG_NONE, + tcg_out_insn(s, RXY, LG, TCG_TMP0, TCG_TMP0, TCG_REG_NONE, offsetof(CPUTLBEntry, addend)); - return TCG_REG_R2; + return TCG_TMP0; } static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, @@ -3185,10 +3173,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_qemu_ld_i32: case INDEX_op_qemu_ld_i64: - return C_O1_I1(r, L); + return C_O1_I1(r, r); case INDEX_op_qemu_st_i64: case INDEX_op_qemu_st_i32: - return C_O0_I2(L, L); + return C_O0_I2(r, r); case INDEX_op_deposit_i32: case INDEX_op_deposit_i64: -- 2.34.1