Take the operation size from the MemOp instead of a separate parameter. Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- target/sparc/translate.c | 138 +++++++++++++++++++++++---------------- 1 file changed, 80 insertions(+), 58 deletions(-)
diff --git a/target/sparc/translate.c b/target/sparc/translate.c index 07bd3c123a..5a385024ce 100644 --- a/target/sparc/translate.c +++ b/target/sparc/translate.c @@ -2305,35 +2305,41 @@ static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr) } } -static void __attribute__((unused)) -gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) +static void gen_ldf_asi0(DisasContext *dc, DisasASI *da, MemOp orig_size, + TCGv addr, int rd) { - DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ)); + MemOp memop = da->memop; + MemOp size = memop & MO_SIZE; TCGv_i32 d32; TCGv_i64 d64; - switch (da.type) { + /* TODO: Use 128-bit load/store below. */ + if (size == MO_128) { + memop = (memop & ~MO_SIZE) | MO_64; + } + + switch (da->type) { case GET_ASI_EXCP: break; case GET_ASI_DIRECT: - gen_address_mask(dc, addr); + memop |= MO_ALIGN_4; switch (size) { - case 4: + case MO_32: d32 = gen_dest_fpr_F(dc); - tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN); + tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop); gen_store_fpr_F(dc, rd, d32); break; - case 8: - tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, - da.memop | MO_ALIGN_4); + + case MO_64: + tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop); break; - case 16: + + case MO_128: d64 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4); + tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop); tcg_gen_addi_tl(addr, addr, 8); - tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, - da.memop | MO_ALIGN_4); + tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr, da->mem_idx, memop); tcg_gen_mov_i64(cpu_fpr[rd / 2], d64); break; default: @@ -2343,24 +2349,19 @@ gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) case GET_ASI_BLOCK: /* Valid for lddfa on aligned registers only. */ - if (size == 8 && (rd & 7) == 0) { - MemOp memop; + if (orig_size == MO_64 && (rd & 7) == 0) { TCGv eight; int i; - gen_address_mask(dc, addr); - /* The first operation checks required alignment. */ - memop = da.memop | MO_ALIGN_64; eight = tcg_constant_tl(8); for (i = 0; ; ++i) { - tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, - da.mem_idx, memop); + tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx, + memop | (i == 0 ? MO_ALIGN_64 : 0)); if (i == 7) { break; } tcg_gen_add_tl(addr, addr, eight); - memop = da.memop; } } else { gen_exception(dc, TT_ILL_INSN); @@ -2369,10 +2370,9 @@ gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) case GET_ASI_SHORT: /* Valid for lddfa only. */ - if (size == 8) { - gen_address_mask(dc, addr); - tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, - da.memop | MO_ALIGN); + if (orig_size == MO_64) { + tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, + memop | MO_ALIGN); } else { gen_exception(dc, TT_ILL_INSN); } @@ -2380,8 +2380,8 @@ gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) default: { - TCGv_i32 r_asi = tcg_constant_i32(da.asi); - TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN); + TCGv_i32 r_asi = tcg_constant_i32(da->asi); + TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN); save_state(dc); /* According to the table in the UA2011 manual, the only @@ -2389,21 +2389,23 @@ gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) the NO_FAULT asis. We still need a helper for these, but we can just use the integer asi helper for them. */ switch (size) { - case 4: + case MO_32: d64 = tcg_temp_new_i64(); gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop); d32 = gen_dest_fpr_F(dc); tcg_gen_extrl_i64_i32(d32, d64); gen_store_fpr_F(dc, rd, d32); break; - case 8: - gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, r_asi, r_mop); + case MO_64: + gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, + r_asi, r_mop); break; - case 16: + case MO_128: d64 = tcg_temp_new_i64(); gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop); tcg_gen_addi_tl(addr, addr, 8); - gen_helper_ld_asi(cpu_fpr[rd/2+1], tcg_env, addr, r_asi, r_mop); + gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr, + r_asi, r_mop); tcg_gen_mov_i64(cpu_fpr[rd / 2], d64); break; default: @@ -2415,36 +2417,52 @@ gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) } static void __attribute__((unused)) -gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) +gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) { - DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ)); + MemOp sz = ctz32(size); + DisasASI da = get_asi(dc, insn, MO_TE | sz); + + gen_address_mask(dc, addr); + gen_ldf_asi0(dc, &da, sz, addr, rd); +} + +static void gen_stf_asi0(DisasContext *dc, DisasASI *da, MemOp orig_size, + TCGv addr, int rd) +{ + MemOp memop = da->memop; + MemOp size = memop & MO_SIZE; TCGv_i32 d32; - switch (da.type) { + /* TODO: Use 128-bit load/store below. */ + if (size == MO_128) { + memop = (memop & ~MO_SIZE) | MO_64; + } + + switch (da->type) { case GET_ASI_EXCP: break; case GET_ASI_DIRECT: - gen_address_mask(dc, addr); + memop |= MO_ALIGN_4; switch (size) { - case 4: + case MO_32: d32 = gen_load_fpr_F(dc, rd); - tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN); + tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN); break; - case 8: - tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, - da.memop | MO_ALIGN_4); + case MO_64: + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx, + memop | MO_ALIGN_4); break; - case 16: + case MO_128: /* Only 4-byte alignment required. However, it is legal for the cpu to signal the alignment fault, and the OS trap handler is required to fix it up. Requiring 16-byte alignment here avoids having to probe the second page before performing the first write. */ - tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, - da.memop | MO_ALIGN_16); + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx, + memop | MO_ALIGN_16); tcg_gen_addi_tl(addr, addr, 8); - tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop); + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr, da->mem_idx, memop); break; default: g_assert_not_reached(); @@ -2453,24 +2471,19 @@ gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) case GET_ASI_BLOCK: /* Valid for stdfa on aligned registers only. */ - if (size == 8 && (rd & 7) == 0) { - MemOp memop; + if (orig_size == MO_64 && (rd & 7) == 0) { TCGv eight; int i; - gen_address_mask(dc, addr); - /* The first operation checks required alignment. */ - memop = da.memop | MO_ALIGN_64; eight = tcg_constant_tl(8); for (i = 0; ; ++i) { - tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, - da.mem_idx, memop); + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx, + memop | (i == 0 ? MO_ALIGN_64 : 0)); if (i == 7) { break; } tcg_gen_add_tl(addr, addr, eight); - memop = da.memop; } } else { gen_exception(dc, TT_ILL_INSN); @@ -2479,10 +2492,9 @@ gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) case GET_ASI_SHORT: /* Valid for stdfa only. */ - if (size == 8) { - gen_address_mask(dc, addr); - tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, - da.memop | MO_ALIGN); + if (orig_size == MO_64) { + tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx, + memop | MO_ALIGN); } else { gen_exception(dc, TT_ILL_INSN); } @@ -2497,6 +2509,16 @@ gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) } } +static void __attribute__((unused)) +gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) +{ + MemOp sz = ctz32(size); + DisasASI da = get_asi(dc, insn, MO_TE | sz); + + gen_address_mask(dc, addr); + gen_stf_asi0(dc, &da, sz, addr, rd); +} + static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd) { TCGv hi = gen_dest_gpr(dc, rd); -- 2.34.1