[...] > > +static DisasJumpType op_vlbr(DisasContext *s, DisasOps *o) > +{ > + const uint8_t es = get_field(s, m3); > + TCGv_i64 t0, t1; > + > + if (es < ES_16 || es > ES_128) { > + gen_program_exception(s, PGM_SPECIFICATION); > + return DISAS_NORETURN; > + } > + > + t0 = tcg_temp_new_i64(); > + t1 = tcg_temp_new_i64(); > + > + > + if (es == ES_128) { > + tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_LEUQ); > + gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); > + tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_LEUQ); > + goto write; > + } > + > + /* Begin with byte reversed doublewords... */ > + tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_LEUQ); > + gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); > + tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_LEUQ); > + > + /* > + * For 16 and 32-bit elements, the doubleword bswap also reversed > + * the order of the elements. Perform a larger order swap to put > + * them back into place. For the 128-bit "element", finish the > + * bswap by swapping the doublewords.
Drop the 128-bit part of the comment. > + */ > + switch (es) { > + case ES_16: > + tcg_gen_hswap_i64(t0, t0); > + tcg_gen_hswap_i64(t1, t1); > + break; > + case ES_32: > + tcg_gen_wswap_i64(t0, t0); > + tcg_gen_wswap_i64(t1, t1); > + break; > + case ES_64: > + case ES_128: Drop the ES_128 case. > + break; > + default: > + g_assert_not_reached(); > + } > + > +write: > + write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); > + write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); > + > + tcg_temp_free(t0); > + tcg_temp_free(t1); > + return DISAS_NEXT; > +} > + > static DisasJumpType op_vle(DisasContext *s, DisasOps *o) > { > const uint8_t es = s->insn->data; > @@ -998,6 +1055,64 @@ static DisasJumpType op_vst(DisasContext *s, DisasOps > *o) > return DISAS_NEXT; > } > > +static DisasJumpType op_vstbr(DisasContext *s, DisasOps *o) > +{ > + const uint8_t es = get_field(s, m3); > + TCGv_i64 t0, t1; > + > + if (es < ES_16 || es > ES_128) { > + gen_program_exception(s, PGM_SPECIFICATION); > + return DISAS_NORETURN; > + } > + > + /* Probe write access before actually modifying memory */ > + gen_helper_probe_write_access(cpu_env, o->addr1, tcg_constant_i64(16)); > + > + t0 = tcg_temp_new_i64(); > + t1 = tcg_temp_new_i64(); > + > + > + if (es == ES_128) { > + read_vec_element_i64(t1, get_field(s, v1), 0, ES_64); > + read_vec_element_i64(t0, get_field(s, v1), 1, ES_64); > + goto write; > + } > + > + read_vec_element_i64(t0, get_field(s, v1), 0, ES_64); > + read_vec_element_i64(t1, get_field(s, v1), 1, ES_64); > + > + /* > + * For 16 and 32-bit elements, the doubleword bswap below will > + * reverse the order of the elements. Perform a larger order > + * swap to put them back into place. For the 128-bit "element", > + * finish the bswap by swapping the doublewords. Dito. > + */ > + switch (es) { > + case MO_16: > + tcg_gen_hswap_i64(t0, t0); > + tcg_gen_hswap_i64(t1, t1); > + break; > + case MO_32: > + tcg_gen_wswap_i64(t0, t0); > + tcg_gen_wswap_i64(t1, t1); > + break; > + case MO_64: > + case MO_128: Dito. > + break; > + default: > + g_assert_not_reached(); > + } > + > +write: > + tcg_gen_qemu_st_i64(t0, o->addr1, get_mem_index(s), MO_LEUQ); > + gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); > + tcg_gen_qemu_st_i64(t1, o->addr1, get_mem_index(s), MO_LEUQ); > + > + tcg_temp_free(t0); > + tcg_temp_free(t1); > + return DISAS_NEXT; > +} > + > static DisasJumpType op_vste(DisasContext *s, DisasOps *o) > { > const uint8_t es = s->insn->data; With that, Reviewed-by: David Hildenbrand <da...@redhat.com> -- Thanks, David / dhildenb