Add instructions of SRR opcode format. Add helper for add/sub_ssov. Signed-off-by: Bastian Koppelmann <kbast...@mail.uni-paderborn.de> --- v2 -> v3: - Replace ADD instructions with gen_add_i32 for PSW bit calculation. - Add gen_sub/mul_i32 for PSW bit calculation - Replace SUB instructions with gen_sub_i32 for PSW bit calculation. - Replace MUL instruction with gen_mul_i32s for PSW bit calculation. - Fix PSW bit calculation in SSOV makro.
target-tricore/helper.h | 4 ++ target-tricore/op_helper.c | 39 ++++++++++++ target-tricore/translate.c | 144 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 187 insertions(+) diff --git a/target-tricore/helper.h b/target-tricore/helper.h index 5884240..299bd77 100644 --- a/target-tricore/helper.h +++ b/target-tricore/helper.h @@ -14,3 +14,7 @@ * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ + +/* Arithmetic */ +DEF_HELPER_3(add_ssov, i32, env, i32, i32) +DEF_HELPER_3(sub_ssov, i32, env, i32, i32) diff --git a/target-tricore/op_helper.c b/target-tricore/op_helper.c index 2e5981f..c6a1186 100644 --- a/target-tricore/op_helper.c +++ b/target-tricore/op_helper.c @@ -20,6 +20,45 @@ #include "exec/helper-proto.h" #include "exec/cpu_ldst.h" +#define SSOV(env, ret, arg, len) do { \ + int64_t max_pos = INT##len ##_MAX; \ + int64_t max_neg = INT##len ##_MIN; \ + if (arg > max_pos) { \ + env->PSW_USB_V = 1; \ + env->PSW_USB_SV = 1; \ + ret = (target_ulong)max_pos; \ + } else { \ + if (arg < max_neg) { \ + env->PSW_USB_V = 1; \ + env->PSW_USB_SV = 1; \ + ret = (target_ulong)max_neg; \ + } else { \ + env->PSW_USB_V = 0; \ + ret = (target_ulong)arg; \ + } \ + } \ + env->PSW_USB_AV = arg ^ arg * 2u; \ + env->PSW_USB_SAV |= env->PSW_USB_AV; \ +} while (0) + +target_ulong helper_add_ssov(CPUTRICOREState *env, target_ulong r1, + target_ulong r2) +{ + target_ulong ret; + int64_t result = (int64_t)r1 + (int64_t)r2; + SSOV(env, ret, result, 32); + return ret; +} + +target_ulong helper_sub_ssov(CPUTRICOREState *env, target_ulong r1, + target_ulong r2) +{ + target_ulong ret; + int64_t result = (int64_t)r1 - (int64_t)r2; + SSOV(env, ret, result, 32); + return ret; +} + static inline void QEMU_NORETURN do_raise_exception_err(CPUTRICOREState *env, uint32_t exception, int error_code, diff --git a/target-tricore/translate.c b/target-tricore/translate.c index d1e6669..faf5633 100644 --- a/target-tricore/translate.c +++ b/target-tricore/translate.c @@ -150,6 +150,29 @@ static inline void gen_addi_i32(TCGv ret, TCGv r1, target_ulong r2) tcg_temp_free(temp); } +static inline void gen_sub_i32(TCGv ret, TCGv r1, TCGv r2) +{ + TCGv t0 = tcg_temp_new_i32(); + /* Subtraction and set V/SV bits */ + tcg_gen_movi_tl(t0, 0); + tcg_gen_sub2_tl(ret, cpu_PSW_V, r1, t0, r2, t0); + gen_calc_psw_sv_i32(cpu_PSW_SV, cpu_PSW_V); + /* Calc AV/SAV bits */ + gen_calc_psw_av_i32(cpu_PSW_AV, ret); + gen_calc_psw_sav_i32(cpu_PSW_SAV, cpu_PSW_AV); + tcg_temp_free(t0); +} + +static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2) +{ + /* mul and set V/SV bits */ + tcg_gen_muls2_tl(ret, cpu_PSW_V, r1, r2); + gen_calc_psw_sv_i32(cpu_PSW_SV, cpu_PSW_V); + /* Calc AV/SAV bits */ + gen_calc_psw_av_i32(cpu_PSW_AV, ret); + gen_calc_psw_sav_i32(cpu_PSW_SAV, cpu_PSW_AV); +} + #define OP_COND(insn)\ static inline void gen_cond_##insn(int cond, TCGv r1, TCGv r2, TCGv r3, \ TCGv r4) \ @@ -251,6 +274,20 @@ static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count) tcg_temp_free_i64(t_min); } +static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2) +{ + TCGv temp = tcg_const_i32(32); + gen_helper_add_ssov(ret, cpu_env, r1, r2); + tcg_temp_free(temp); +} + +static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2) +{ + TCGv temp = tcg_const_i32(32); + gen_helper_sub_ssov(ret, cpu_env, r1, r2); + tcg_temp_free(temp); +} + /* * Functions for decoding instructions */ @@ -324,6 +361,89 @@ static void decode_src_opc(DisasContext *ctx, int op1) } } +static void decode_srr_opc(DisasContext *ctx, int op1) +{ + int r1, r2; + TCGv temp; + + r1 = MASK_OP_SRR_S1D(ctx->opcode); + r2 = MASK_OP_SRR_S2(ctx->opcode); + + switch (op1) { + case OPC1_16_SRR_ADD: + gen_add_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_ADD_A15: + gen_add_i32(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_ADD_15A: + gen_add_i32(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_ADD_A: + tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], cpu_gpr_a[r2]); + break; + case OPC1_16_SRR_ADDS: + gen_adds(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_AND: + tcg_gen_and_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_CMOV: + temp = tcg_const_tl(0); + tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp, + cpu_gpr_d[r2], cpu_gpr_d[r1]); + tcg_temp_free(temp); + break; + case OPC1_16_SRR_CMOVN: + temp = tcg_const_tl(0); + tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp, + cpu_gpr_d[r2], cpu_gpr_d[r1]); + tcg_temp_free(temp); + break; + case OPC1_16_SRR_EQ: + tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_LT: + tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_MOV: + tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_MOV_A: + tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_MOV_AA: + tcg_gen_mov_tl(cpu_gpr_a[r2], cpu_gpr_a[r1]); + break; + case OPC1_16_SRR_MOV_D: + tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_a[r2]); + break; + case OPC1_16_SRR_MUL: + gen_mul_i32s(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_OR: + tcg_gen_or_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_SUB: + gen_sub_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_SUB_A15B: + gen_sub_i32(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_SUB_15AB: + gen_sub_i32(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_SUBS: + gen_subs(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_XOR: + tcg_gen_xor_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + } +} + static void decode_16Bit_opc(CPUTRICOREState *env, DisasContext *ctx) { int op1; @@ -347,6 +467,30 @@ static void decode_16Bit_opc(CPUTRICOREState *env, DisasContext *ctx) case OPC1_16_SRC_SHA: decode_src_opc(ctx, op1); break; +/* SRR-format */ + case OPC1_16_SRR_ADD: + case OPC1_16_SRR_ADD_A15: + case OPC1_16_SRR_ADD_15A: + case OPC1_16_SRR_ADD_A: + case OPC1_16_SRR_ADDS: + case OPC1_16_SRR_AND: + case OPC1_16_SRR_CMOV: + case OPC1_16_SRR_CMOVN: + case OPC1_16_SRR_EQ: + case OPC1_16_SRR_LT: + case OPC1_16_SRR_MOV: + case OPC1_16_SRR_MOV_A: + case OPC1_16_SRR_MOV_AA: + case OPC1_16_SRR_MOV_D: + case OPC1_16_SRR_MUL: + case OPC1_16_SRR_OR: + case OPC1_16_SRR_SUB: + case OPC1_16_SRR_SUB_A15B: + case OPC1_16_SRR_SUB_15AB: + case OPC1_16_SRR_SUBS: + case OPC1_16_SRR_XOR: + decode_srr_opc(ctx, op1); + break; } } -- 2.0.4