Il 11/10/2013 14:03, Tom Musta ha scritto: > This patch adds the vector move instructions: > > - xvabsdp - Vector Absolute Value Double-Precision > - xvnabsdp - Vector Negative Absolute Value Double-Precision > - xvnegdp - Vector Negate Double-Precision > - xvcpsgndp - Vector Copy Sign Double-Precision > - xvabssp - Vector Absolute Value Single-Precision > - xvnabssp - Vector Negative Absolute Value Single-Precision > - xvnegsp - Vector Negate Single-Precision > - xvcpsgnsp - Vector Copy Sign Single-Precision > > Signed-off-by: Tom Musta <tommu...@gmail.com> > --- > target-ppc/translate.c | 68 > ++++++++++++++++++++++++++++++++++++++++++++++++ > 1 files changed, 68 insertions(+), 0 deletions(-) > > diff --git a/target-ppc/translate.c b/target-ppc/translate.c > index 36e04b0..03a352d 100644 > --- a/target-ppc/translate.c > +++ b/target-ppc/translate.c > @@ -7216,6 +7216,66 @@ VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP) > VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP) > VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP) > > +#define VSX_VECTOR_MOVE(name, op, sgn_mask) \ > +static void glue(gen_, name)(DisasContext * ctx) \ > + { \ > + TCGv_i64 xbh, xbl; \ > + if (unlikely(!ctx->vsx_enabled)) { \ > + gen_exception(ctx, POWERPC_EXCP_VSXU); \ > + return; \ > + } \ > + xbh = tcg_temp_new(); \ > + xbl = tcg_temp_new(); \ > + tcg_gen_mov_i64(xbh, cpu_vsrh(xB(ctx->opcode))); \ > + tcg_gen_mov_i64(xbl, cpu_vsrl(xB(ctx->opcode))); \ > + switch (op) { \ > + case OP_ABS: { \ > + tcg_gen_andi_i64(xbh, xbh, ~(sgn_mask)); \ > + tcg_gen_andi_i64(xbl, xbl, ~(sgn_mask)); \ > + break; \ > + } \ > + case OP_NABS: { \ > + tcg_gen_ori_i64(xbh, xbh, (sgn_mask)); \ > + tcg_gen_ori_i64(xbl, xbl, (sgn_mask)); \ > + break; \ > + } \ > + case OP_NEG: { \ > + tcg_gen_xori_i64(xbh, xbh, (sgn_mask)); \ > + tcg_gen_xori_i64(xbl, xbl, (sgn_mask)); \ > + break; \ > + } \ > + case OP_CPSGN: { \ > + TCGv_i64 xah = tcg_temp_new(); \ > + TCGv_i64 xal = tcg_temp_new(); \ > + tcg_gen_mov_i64(xah, cpu_vsrh(xA(ctx->opcode))); \ > + tcg_gen_mov_i64(xal, cpu_vsrl(xA(ctx->opcode))); \ > + tcg_gen_andi_i64(xah, xah, (sgn_mask)); \ > + tcg_gen_andi_i64(xal, xal, (sgn_mask)); \ > + tcg_gen_andi_i64(xbh, xbh, ~(sgn_mask)); \ > + tcg_gen_andi_i64(xbl, xbl, ~(sgn_mask)); \ > + tcg_gen_or_i64(xbh, xbh, xah); \ > + tcg_gen_or_i64(xbl, xbl, xal); \ > + tcg_temp_free(xah); \ > + tcg_temp_free(xal); \ > + break; \
Same as before. Also, you may want to use a temporary for the other cases as well, so that the constant is reused. Using andc for OP_ABS also makes sense, since on some RISC machines 0x80000... is cheaper than 0x7FFFF... On target that lack andc, you'll just get the same code you're generating now. Paolo > + } \ > + } \ > + tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xbh); \ > + tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xbl); \ > + tcg_temp_free(xbh); \ > + tcg_temp_free(xbl); \ > + } > + > +VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP) > +VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP) > +VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP) > +VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP) > +VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP) > +VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP) > +VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP) > +VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP) > + > + > > /*** SPE > extension ***/ > /* Register moves */ > @@ -9711,6 +9771,14 @@ GEN_XX2FORM(xsnabsdp, 0x12, 0x16, PPC2_VSX), > GEN_XX2FORM(xsnegdp, 0x12, 0x17, PPC2_VSX), > GEN_XX3FORM(xscpsgndp, 0x00, 0x16, PPC2_VSX), > > +GEN_XX2FORM(xvabsdp, 0x12, 0x1D, PPC2_VSX), > +GEN_XX2FORM(xvnabsdp, 0x12, 0x1E, PPC2_VSX), > +GEN_XX2FORM(xvnegdp, 0x12, 0x1F, PPC2_VSX), > +GEN_XX3FORM(xvcpsgndp, 0x00, 0x1E, PPC2_VSX), > +GEN_XX2FORM(xvabssp, 0x12, 0x19, PPC2_VSX), > +GEN_XX2FORM(xvnabssp, 0x12, 0x1A, PPC2_VSX), > +GEN_XX2FORM(xvnegsp, 0x12, 0x1B, PPC2_VSX), > +GEN_XX3FORM(xvcpsgnsp, 0x00, 0x1A, PPC2_VSX), > GEN_XX3FORM_DM(xxpermdi, 0x08, 0x01), > > #undef GEN_SPE