On 17 February 2018 at 18:23, Richard Henderson <richard.hender...@linaro.org> wrote: > Signed-off-by: Richard Henderson <richard.hender...@linaro.org> > --- > target/arm/translate-sve.c | 101 > +++++++++++++++++++++++++++++++++++++++++++++ > target/arm/sve.decode | 6 +++ > 2 files changed, 107 insertions(+) > > diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c > index b000a2482e..9c724980a0 100644 > --- a/target/arm/translate-sve.c > +++ b/target/arm/translate-sve.c > @@ -3501,6 +3501,95 @@ static void do_ldr(DisasContext *s, uint32_t vofs, > uint32_t len, > tcg_temp_free_i64(t0); > } > > +/* Similarly for stores. */ > +static void do_str(DisasContext *s, uint32_t vofs, uint32_t len, > + int rn, int imm) > +{ > + uint32_t len_align = QEMU_ALIGN_DOWN(len, 8); > + uint32_t len_remain = len % 8; > + uint32_t nparts = len / 8 + ctpop8(len_remain); > + int midx = get_mem_index(s); > + TCGv_i64 addr, t0; > + > + addr = tcg_temp_new_i64(); > + t0 = tcg_temp_new_i64(); > + > + /* Note that unpredicated load/store of vector/predicate registers > + * are defined as a stream of bytes, which equates to little-endian > + * operations on larger quantities. There is no nice way to force > + * a little-endian load for aarch64_be-linux-user out of line.
"store" in this case, I assume. > + * > + * Attempt to keep code expansion to a minimum by limiting the > + * amount of unrolling done. > + */ > + if (nparts <= 4) { > + int i; > + > + for (i = 0; i < len_align; i += 8) { > + tcg_gen_ld_i64(t0, cpu_env, vofs + i); > + tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + i); > + tcg_gen_qemu_st_i64(t0, addr, midx, MO_LEQ); > + } > + } else { > + TCGLabel *loop = gen_new_label(); > + TCGv_ptr i = TCGV_NAT_TO_PTR(glue(tcg_const_local_, ptr)(0)); > + TCGv_ptr src; > + > + gen_set_label(loop); > + > + src = tcg_temp_new_ptr(); > + tcg_gen_add_ptr(src, cpu_env, i); > + tcg_gen_ld_i64(t0, src, vofs); > + > + /* Minimize the number of local temps that must be re-read from > + * the stack each iteration. Instead, re-compute values other > + * than the loop counter. > + */ > + tcg_gen_addi_ptr(src, i, imm); > +#if UINTPTR_MAX == UINT32_MAX > + tcg_gen_extu_i32_i64(addr, TCGV_PTR_TO_NAT(src)); > + tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, rn)); > +#else > + tcg_gen_add_i64(addr, TCGV_PTR_TO_NAT(src), cpu_reg_sp(s, rn)); > +#endif We should be able to avoid the ifdef with more support for tcg_*_ptr ops (similar to an earlier patch in the series). > + tcg_temp_free_ptr(src); > + > + tcg_gen_qemu_st_i64(t0, addr, midx, MO_LEQ); > + > + tcg_gen_addi_ptr(i, i, 8); > + > + glue(tcg_gen_brcondi_, ptr)(TCG_COND_LTU, TCGV_PTR_TO_NAT(i), > + len_align, loop); > + tcg_temp_free_ptr(i); > + } > + > + /* Predicate register stores can be any multiple of 2. */ > + if (len_remain) { > + tcg_gen_ld_i64(t0, cpu_env, vofs + len_align); > + tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + len_align); > + > + switch (len_remain) { > + case 2: > + case 4: > + case 8: > + tcg_gen_qemu_st_i64(t0, addr, midx, MO_LE | ctz32(len_remain)); > + break; > + > + case 6: > + tcg_gen_qemu_st_i64(t0, addr, midx, MO_LEUL); > + tcg_gen_addi_i64(addr, addr, 4); > + tcg_gen_shri_i64(addr, addr, 32); > + tcg_gen_qemu_st_i64(t0, addr, midx, MO_LEUW); > + break; > + > + default: > + g_assert_not_reached(); > + } > + } > + tcg_temp_free_i64(addr); > + tcg_temp_free_i64(t0); > +} > + > #undef ptr thanks -- PMM