From: Juha Riihimäki <juha.riihim...@nokia.com> source code alignment through the disas_neon_data_insn() function is inconsistent; this patch aligns the formatting.
Signed-off-by: Juha Riihimäki <juha.riihim...@nokia.com> --- target-arm/translate.c | 697 +++++++++++++++++++++++++++--------------------- 1 files changed, 390 insertions(+), 307 deletions(-) diff --git a/target-arm/translate.c b/target-arm/translate.c index 3d409b6..eb1edfd 100644 --- a/target-arm/translate.c +++ b/target-arm/translate.c @@ -4136,8 +4136,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) TCGv tmp, tmp2, tmp3, tmp4, tmp5; TCGv_i64 tmp64; - if (!vfp_enabled(env)) - return 1; + if (!vfp_enabled(env)) { + return 1; + } q = (insn & (1 << 6)) != 0; u = (insn >> 24) & 1; VFP_DREG_D(rd, insn); @@ -4292,261 +4293,275 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) } for (pass = 0; pass < (q ? 4 : 2); pass++) { - if (pairwise) { - /* Pairwise. */ - if (!pass) { - tmp = neon_load_reg(rn, 0); - tmp2 = neon_load_reg(rn, 1); + if (pairwise) { + /* Pairwise. */ + if (!pass) { + tmp = neon_load_reg(rn, 0); + tmp2 = neon_load_reg(rn, 1); + } else { + tmp = neon_load_reg(rm, 0); + tmp2 = neon_load_reg(rm, 1); + } } else { - tmp = neon_load_reg(rm, 0); - tmp2 = neon_load_reg(rm, 1); + /* Elementwise. */ + tmp = neon_load_reg(rn, pass); + tmp2 = neon_load_reg(rm, pass); } - } else { - /* Elementwise. */ - tmp = neon_load_reg(rn, pass); - tmp2 = neon_load_reg(rm, pass); - } - switch (op) { - case 0: /* VHADD */ - GEN_NEON_INTEGER_OP(hadd); - break; - case 1: /* VQADD */ - GEN_NEON_INTEGER_OP_ENV(qadd); - break; - case 2: /* VRHADD */ - GEN_NEON_INTEGER_OP(rhadd); - break; - case 3: /* Logic ops. */ - switch ((u << 2) | size) { - case 0: /* VAND */ - tcg_gen_and_i32(tmp, tmp, tmp2); + switch (op) { + case 0: /* VHADD */ + GEN_NEON_INTEGER_OP(hadd); break; - case 1: /* BIC */ - tcg_gen_andc_i32(tmp, tmp, tmp2); + case 1: /* VQADD */ + GEN_NEON_INTEGER_OP_ENV(qadd); break; - case 2: /* VORR */ - tcg_gen_or_i32(tmp, tmp, tmp2); + case 2: /* VRHADD */ + GEN_NEON_INTEGER_OP(rhadd); break; - case 3: /* VORN */ - tcg_gen_orc_i32(tmp, tmp, tmp2); + case 3: /* Logic ops. */ + switch ((u << 2) | size) { + case 0: /* VAND */ + tcg_gen_and_i32(tmp, tmp, tmp2); + break; + case 1: /* VBIC */ + tcg_gen_andc_i32(tmp, tmp, tmp2); + break; + case 2: /* VORR, VMOV */ + tcg_gen_or_i32(tmp, tmp, tmp2); + break; + case 3: /* VORN */ + tcg_gen_orc_i32(tmp, tmp, tmp2); + break; + case 4: /* VEOR */ + tcg_gen_xor_i32(tmp, tmp, tmp2); + break; + case 5: /* VBSL */ + tmp3 = neon_load_reg(rd, pass); + gen_neon_bsl(tmp, tmp, tmp2, tmp3); + dead_tmp(tmp3); + break; + case 6: /* VBIT */ + tmp3 = neon_load_reg(rd, pass); + gen_neon_bsl(tmp, tmp, tmp3, tmp2); + dead_tmp(tmp3); + break; + case 7: /* VBIF */ + tmp3 = neon_load_reg(rd, pass); + gen_neon_bsl(tmp, tmp3, tmp, tmp2); + dead_tmp(tmp3); + break; + } break; - case 4: /* VEOR */ - tcg_gen_xor_i32(tmp, tmp, tmp2); + case 4: /* VHSUB */ + GEN_NEON_INTEGER_OP(hsub); break; - case 5: /* VBSL */ - tmp3 = neon_load_reg(rd, pass); - gen_neon_bsl(tmp, tmp, tmp2, tmp3); - dead_tmp(tmp3); + case 5: /* VQSUB */ + GEN_NEON_INTEGER_OP_ENV(qsub); break; - case 6: /* VBIT */ - tmp3 = neon_load_reg(rd, pass); - gen_neon_bsl(tmp, tmp, tmp3, tmp2); - dead_tmp(tmp3); + case 6: /* VCGT */ + GEN_NEON_INTEGER_OP(cgt); break; - case 7: /* VBIF */ - tmp3 = neon_load_reg(rd, pass); - gen_neon_bsl(tmp, tmp3, tmp, tmp2); - dead_tmp(tmp3); + case 7: /* VCGE */ + GEN_NEON_INTEGER_OP(cge); break; - } - break; - case 4: /* VHSUB */ - GEN_NEON_INTEGER_OP(hsub); - break; - case 5: /* VQSUB */ - GEN_NEON_INTEGER_OP_ENV(qsub); - break; - case 6: /* VCGT */ - GEN_NEON_INTEGER_OP(cgt); - break; - case 7: /* VCGE */ - GEN_NEON_INTEGER_OP(cge); - break; - case 8: /* VSHL */ - GEN_NEON_INTEGER_OP(shl); - break; - case 9: /* VQSHL */ - GEN_NEON_INTEGER_OP_ENV(qshl); - break; - case 10: /* VRSHL */ - GEN_NEON_INTEGER_OP(rshl); - break; - case 11: /* VQRSHL */ - GEN_NEON_INTEGER_OP_ENV(qrshl); - break; - case 12: /* VMAX */ - GEN_NEON_INTEGER_OP(max); - break; - case 13: /* VMIN */ - GEN_NEON_INTEGER_OP(min); - break; - case 14: /* VABD */ - GEN_NEON_INTEGER_OP(abd); - break; - case 15: /* VABA */ - GEN_NEON_INTEGER_OP(abd); - dead_tmp(tmp2); - tmp2 = neon_load_reg(rd, pass); - gen_neon_add(size, tmp, tmp2); - break; - case 16: - if (!u) { /* VADD */ - if (gen_neon_add(size, tmp, tmp2)) - abort(); /* size == 3 is handled earlier */ - } else { /* VSUB */ - switch (size) { - case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break; - case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break; - default: abort(); /* size == 3 is handled earlier */ - } - } - break; - case 17: - if (!u) { /* VTST */ - switch (size) { - case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break; - case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break; - default: abort(); /* size == 3 is handled earlier */ + case 8: /* VSHL */ + GEN_NEON_INTEGER_OP(shl); + break; + case 9: /* VQSHL */ + GEN_NEON_INTEGER_OP_ENV(qshl); + break; + case 10: /* VRSHL */ + GEN_NEON_INTEGER_OP(rshl); + break; + case 11: /* VQRSHL */ + GEN_NEON_INTEGER_OP_ENV(qrshl); + break; + case 12: /* VMAX */ + GEN_NEON_INTEGER_OP(max); + break; + case 13: /* VMIN */ + GEN_NEON_INTEGER_OP(min); + break; + case 14: /* VABD */ + GEN_NEON_INTEGER_OP(abd); + break; + case 15: /* VABA */ + GEN_NEON_INTEGER_OP(abd); + dead_tmp(tmp2); + tmp2 = neon_load_reg(rd, pass); + gen_neon_add(size, tmp, tmp2); + break; + case 16: + if (!u) { /* VADD */ + if (gen_neon_add(size, tmp, tmp2)) { + abort(); /* size == 3 is handled earlier */ + } + } else { /* VSUB */ + switch (size) { + case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break; + case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break; + case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break; + default: abort(); /* size == 3 is handled earlier */ + } } - } else { /* VCEQ */ - switch (size) { - case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break; - case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break; - default: abort(); /* size == 3 is handled earlier */ + break; + case 17: + if (!u) { /* VTST */ + switch (size) { + case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break; + case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break; + case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break; + default: abort(); /* size == 3 is handled earlier */ + } + } else { /* VCEQ */ + switch (size) { + case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break; + case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break; + case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break; + default: abort(); /* size == 3 is handled earlier */ + } } - } - break; - case 18: /* Multiply. */ - switch (size) { - case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; - case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; - default: abort(); /* size == 3 is handled earlier */ - } - dead_tmp(tmp2); - tmp2 = neon_load_reg(rd, pass); - if (u) { /* VMLS */ - gen_neon_rsb(size, tmp, tmp2); - } else { /* VMLA */ - gen_neon_add(size, tmp, tmp2); - } - break; - case 19: /* VMUL */ - if (u) { /* polynomial */ - gen_helper_neon_mul_p8(tmp, tmp, tmp2); - } else { /* Integer */ + break; + case 18: /* Multiply. */ switch (size) { case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; default: abort(); /* size == 3 is handled earlier */ } - } - break; - case 20: /* VPMAX */ - GEN_NEON_INTEGER_OP(pmax); - break; - case 21: /* VPMIN */ - GEN_NEON_INTEGER_OP(pmin); - break; - case 22: /* Hultiply high. */ - if (!u) { /* VQDMULH */ - switch (size) { - case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break; - case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break; - default: abort(); /* size == 0,3 is handled earlier */ + dead_tmp(tmp2); + tmp2 = neon_load_reg(rd, pass); + if (u) { /* VMLS */ + gen_neon_rsb(size, tmp, tmp2); + } else { /* VMLA */ + gen_neon_add(size, tmp, tmp2); + } + break; + case 19: /* VMUL */ + if (u) { /* polynomial */ + gen_helper_neon_mul_p8(tmp, tmp, tmp2); + } else { /* Integer */ + switch (size) { + case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; + case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; + case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; + default: abort(); /* size == 3 is handled earlier */ + } } - } else { /* VQRDHMUL */ + break; + case 20: /* VPMAX */ + GEN_NEON_INTEGER_OP(pmax); + break; + case 21: /* VPMIN */ + GEN_NEON_INTEGER_OP(pmin); + break; + case 22: /* Multiply high. */ + if (!u) { /* VQDMULH */ + switch (size) { + case 1: + gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); + break; + default: + abort(); /* size == 0,3 is handled earlier */ + } + } else { /* VQRDHMUL */ + switch (size) { + case 1: + gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); + break; + default: + abort(); /* size == 0,3 is handled earlier */ + } + } + break; + case 23: /* VPADD */ switch (size) { - case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break; - case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break; - default: abort(); /* size == 0,3 is handled earlier */ + case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break; + case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break; + case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break; + default: abort(); /* size == 3 is handled earlier */ + } + break; + case 26: /* Floating point arithmetic. */ + switch ((u << 2) | size) { + case 0: /* VADD */ + gen_helper_neon_add_f32(tmp, tmp, tmp2); + break; + case 2: /* VSUB */ + gen_helper_neon_sub_f32(tmp, tmp, tmp2); + break; + case 4: /* VPADD */ + gen_helper_neon_add_f32(tmp, tmp, tmp2); + break; + case 6: /* VABD */ + gen_helper_neon_abd_f32(tmp, tmp, tmp2); + break; + default: + abort(); /* other values are handled earlier */ } - } - break; - case 23: /* VPADD */ - switch (size) { - case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break; - case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break; - default: abort(); /* size == 3 is handled earlier */ - } - break; - case 26: /* Floating point arithnetic. */ - switch ((u << 2) | size) { - case 0: /* VADD */ - gen_helper_neon_add_f32(tmp, tmp, tmp2); break; - case 2: /* VSUB */ - gen_helper_neon_sub_f32(tmp, tmp, tmp2); + case 27: /* Float multiply. */ + gen_helper_neon_mul_f32(tmp, tmp, tmp2); + if (!u) { + dead_tmp(tmp2); + tmp2 = neon_load_reg(rd, pass); + if (size == 0) { + gen_helper_neon_add_f32(tmp, tmp, tmp2); + } else { + gen_helper_neon_sub_f32(tmp, tmp2, tmp); + } + } break; - case 4: /* VPADD */ - gen_helper_neon_add_f32(tmp, tmp, tmp2); + case 28: /* Float compare. */ + if (!u) { + gen_helper_neon_ceq_f32(tmp, tmp, tmp2); + } else { + if (size == 0) { + gen_helper_neon_cge_f32(tmp, tmp, tmp2); + } else { + gen_helper_neon_cgt_f32(tmp, tmp, tmp2); + } + } break; - case 6: /* VABD */ - gen_helper_neon_abd_f32(tmp, tmp, tmp2); + case 29: /* Float compare absolute. */ + if (size == 0) { + gen_helper_neon_acge_f32(tmp, tmp, tmp2); + } else { + gen_helper_neon_acgt_f32(tmp, tmp, tmp2); + } break; - default: - abort(); /* other values are handled earlier */ - } - break; - case 27: /* Float multiply. */ - gen_helper_neon_mul_f32(tmp, tmp, tmp2); - if (!u) { - dead_tmp(tmp2); - tmp2 = neon_load_reg(rd, pass); + case 30: /* Float min/max. */ if (size == 0) { - gen_helper_neon_add_f32(tmp, tmp, tmp2); + gen_helper_neon_max_f32(tmp, tmp, tmp2); } else { - gen_helper_neon_sub_f32(tmp, tmp2, tmp); + gen_helper_neon_min_f32(tmp, tmp, tmp2); } + break; + case 31: + if (size == 0) { + gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env); + } else { + gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env); + } + break; + default: + abort(); } - break; - case 28: /* Float compare. */ - if (!u) { - gen_helper_neon_ceq_f32(tmp, tmp, tmp2); + dead_tmp(tmp2); + + /* Save the result. For elementwise operations we can put it + straight into the destination register. For pairwise operations + we have to be careful to avoid clobbering the source operands.*/ + if (pairwise && rd == rm) { + neon_store_scratch(pass, tmp); } else { - if (size == 0) - gen_helper_neon_cge_f32(tmp, tmp, tmp2); - else - gen_helper_neon_cgt_f32(tmp, tmp, tmp2); + neon_store_reg(rd, pass, tmp); } - break; - case 29: /* Float compare absolute. */ - if (size == 0) - gen_helper_neon_acge_f32(tmp, tmp, tmp2); - else - gen_helper_neon_acgt_f32(tmp, tmp, tmp2); - break; - case 30: /* Float min/max. */ - if (size == 0) - gen_helper_neon_max_f32(tmp, tmp, tmp2); - else - gen_helper_neon_min_f32(tmp, tmp, tmp2); - break; - case 31: - if (size == 0) - gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env); - else - gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env); - break; - default: - abort(); - } - dead_tmp(tmp2); - - /* Save the result. For elementwise operations we can put it - straight into the destination register. For pairwise operations - we have to be careful to avoid clobbering the source operands. */ - if (pairwise && rd == rm) { - neon_store_scratch(pass, tmp); - } else { - neon_store_reg(rd, pass, tmp); - } - } /* for pass */ if (pairwise && rd == rm) { for (pass = 0; pass < (q ? 4 : 2); pass++) { @@ -4567,8 +4582,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) size = 3; } else { size = 2; - while (size && (insn & (1 << (size + 19))) == 0) + while (size && (insn & (1 << (size + 19))) == 0) { size--; + } } shift = (insn >> 16) & ((1 << (3 + size)) - 1); /* To avoid excessive dumplication of ops we implement shift @@ -4582,8 +4598,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) || (!u && (op == 4 || op == 6))) { return 1; } - if (op <= 4) + if (op <= 4) { shift = shift - (1 << (size + 3)); + } if (size == 3) { count = q + 1; } else { @@ -4614,17 +4631,23 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) switch (op) { case 0: /* VSHR */ case 1: /* VSRA */ - if (u) - gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1); - else - gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1); + if (u) { + gen_helper_neon_shl_u64(cpu_V0, cpu_V0, + cpu_V1); + } else { + gen_helper_neon_shl_s64(cpu_V0, cpu_V0, + cpu_V1); + } break; case 2: /* VRSHR */ case 3: /* VRSRA */ - if (u) - gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1); - else - gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1); + if (u) { + gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, + cpu_V1); + } else { + gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, + cpu_V1); + } break; case 4: /* VSRI */ gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1); @@ -4637,10 +4660,13 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) cpu_V1); break; case 7: /* VQSHL/VQSHLU */ - if (u) - gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1); - else - gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1); + if (u) { + gen_helper_neon_qshl_u64(cpu_V0, cpu_env, + cpu_V0, cpu_V1); + } else { + gen_helper_neon_qshl_s64(cpu_V0, cpu_env, + cpu_V0, cpu_V1); + } break; } if (op == 1 || op == 3) { @@ -4678,21 +4704,38 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) case 4: /* VSRI */ case 5: /* VSHL, VSLI */ switch (size) { - case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break; - case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break; - default: abort(); /* size == 3 is handled earlier */ + case 0: + gen_helper_neon_shl_u8(tmp, tmp, tmp2); + break; + case 1: + gen_helper_neon_shl_u16(tmp, tmp, tmp2); + break; + case 2: + gen_helper_neon_shl_u32(tmp, tmp, tmp2); + break; + default: + abort(); /* size == 3 is handled earlier */ } break; - case 6: /* VQSHL */ + case 6: /* VQSHLU */ GEN_NEON_INTEGER_OP_ENV(qshl); break; case 7: /* VQSHLU */ switch (size) { - case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break; - case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break; - case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break; - default: abort(); /* size == 3 is handled earlier */ + case 0: + gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, + tmp2); + break; + case 1: + gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, + tmp2); + break; + case 2: + gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, + tmp2); + break; + default: + abort(); /* size == 3 is handled earlier */ } break; } @@ -4707,32 +4750,35 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) /* Insert */ switch (size) { case 0: - if (op == 4) + if (op == 4) { mask = 0xff >> -shift; - else + } else { mask = (uint8_t)(0xff << shift); + } mask |= mask << 8; mask |= mask << 16; break; case 1: - if (op == 4) + if (op == 4) { mask = 0xffff >> -shift; - else + } else { mask = (uint16_t)(0xffff << shift); + } mask |= mask << 16; break; case 2: if (shift < -31 || shift > 31) { mask = 0; } else { - if (op == 4) + if (op == 4) { mask = 0xffffffffu >> -shift; - else + } else { mask = 0xffffffffu << shift; + } } break; default: - abort(); + abort(); /* size == 3 is handled earlier */ } tmp2 = neon_load_reg(rd, pass); tcg_gen_andi_i32(tmp, tmp, mask); @@ -4775,15 +4821,21 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) if (size == 3) { neon_load_reg64(cpu_V0, rm + pass); if (q) { - if (u) - gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64); - else - gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64); + if (u) { + gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, + tmp64); + } else { + gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, + tmp64); + } } else { - if (u) - gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64); - else - gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64); + if (u) { + gen_helper_neon_shl_u64(cpu_V0, cpu_V0, + tmp64); + } else { + gen_helper_neon_shl_s64(cpu_V0, cpu_V0, + tmp64); + } } } else { tmp = neon_load_reg(rm + pass, 0); @@ -4798,10 +4850,11 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) if (op == 8 && !u) { gen_neon_narrow(size - 1, tmp, cpu_V0); } else { - if (op == 8) + if (op == 8) { gen_neon_narrow_sats(size - 1, tmp, cpu_V0); - else + } else { gen_neon_narrow_satu(size - 1, tmp, cpu_V0); + } } neon_store_reg(rd, pass, tmp); } /* for pass */ @@ -4812,14 +4865,15 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) } } else if (op == 10) { /* VSHLL */ - if (q) + if (q) { return 1; + } tmp = neon_load_reg(rm, 0); tmp2 = neon_load_reg(rm, 1); for (pass = 0; pass < 2; pass++) { - if (pass == 1) + if (pass == 1) { tmp = tmp2; - + } gen_neon_widen(cpu_V0, tmp, size, u); if (shift != 0) { @@ -4846,19 +4900,23 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) return 1; } for (pass = 0; pass < (q ? 4 : 2); pass++) { - tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass)); + tcg_gen_ld_f32(cpu_F0s, cpu_env, + neon_reg_offset(rm, pass)); if (op & 1) { - if (u) + if (u) { gen_vfp_ulto(0, shift); - else + } else { gen_vfp_slto(0, shift); + } } else { - if (u) + if (u) { gen_vfp_toul(0, shift); - else + } else { gen_vfp_tosl(0, shift); + } } - tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass)); + tcg_gen_st_f32(cpu_F0s, cpu_env, + neon_reg_offset(rd, pass)); } } else { return 1; @@ -4897,8 +4955,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) break; case 14: imm |= (imm << 8) | (imm << 16) | (imm << 24); - if (invert) + if (invert) { imm = ~imm; + } break; case 15: if (invert) { @@ -5024,7 +5083,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) case 5: gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2); break; - default: abort(); + default: abort(); /* size == 3 is handled earlier */ } dead_tmp(tmp2); dead_tmp(tmp); @@ -5082,7 +5141,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); tcg_gen_trunc_i64_i32(tmp, cpu_V0); break; - default: abort(); + default: abort(); /* size == 3 is handled earlier */ } } else { switch (size) { @@ -5097,7 +5156,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); tcg_gen_trunc_i64_i32(tmp, cpu_V0); break; - default: abort(); + default: abort(); /* size == 3 is handled earlier */ } } if (pass == 0) { @@ -5136,24 +5195,35 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) tmp2 = neon_load_reg(rn, pass); if (op == 12) { if (size == 1) { - gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); + gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, + tmp2); } else { /* TODO: what happens when size == 0? */ - gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); + gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, + tmp2); } } else if (op == 13) { if (size == 1) { - gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); + gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, + tmp2); } else { /* TODO: what happens when size == 0? */ - gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); + gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, + tmp2); } } else if (op & 1) { gen_helper_neon_mul_f32(tmp, tmp, tmp2); } else { switch (size) { - case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; - case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; - default: abort(); /* size == 3 is handled earlier */ + case 0: + gen_helper_neon_mul_u8(tmp, tmp, tmp2); + break; + case 1: + gen_helper_neon_mul_u16(tmp, tmp, tmp2); + break; + case 2: + tcg_gen_mul_i32(tmp, tmp, tmp2); + break; + default: + abort(); /* size == 3 is handled earlier */ } } dead_tmp(tmp2); @@ -5174,7 +5244,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) gen_helper_neon_sub_f32(tmp, tmp2, tmp); break; default: - abort(); + abort(); /* size == 3 is handled earlier */ } dead_tmp(tmp2); } @@ -5304,8 +5374,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) } switch (op) { case 0: /* VREV64 */ - if (size == 3) + if (size == 3) { return 1; + } for (pass = 0; pass < (q ? 2 : 1); pass++) { tmp = neon_load_reg(rm, pass * 2); tmp2 = neon_load_reg(rm, pass * 2 + 1); @@ -5330,8 +5401,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) break; case 4: case 5: /* VPADDL */ case 12: case 13: /* VPADAL */ - if (size == 3) + if (size == 3) { return 1; + } for (pass = 0; pass < q + 1; pass++) { tmp = neon_load_reg(rm, pass * 2); gen_neon_widen(cpu_V0, tmp, size, op & 1); @@ -5372,8 +5444,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) Rd A3 A2 A1 A0 B2 B0 A2 A0 Rm B3 B2 B1 B0 B3 B1 A3 A1 */ - if (size == 3 || (!q && size == 2)) + if (size == 3 || (!q && size == 2)) { return 1; + } gen_neon_unzip(rd, q, 0, size); gen_neon_unzip(rm, q, 4, size); if (q) { @@ -5399,8 +5472,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) Rd A3 A2 A1 A0 B1 A1 B0 A0 Rm B3 B2 B1 B0 B3 A3 B2 A2 */ - if (size == 3 || (!q && size == 2)) + if (size == 3 || (!q && size == 2)) { return 1; + } count = (q ? 4 : 2); for (n = 0; n < count; n++) { tmp = neon_load_reg(rd, n); @@ -5421,8 +5495,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) } break; case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */ - if (size == 3 || (rm & 1)) + if (size == 3 || (rm & 1)) { return 1; + } TCGV_UNUSED(tmp2); for (pass = 0; pass < 2; pass++) { neon_load_reg64(cpu_V0, rm + pass); @@ -5443,21 +5518,24 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) } break; case 38: /* VSHLL */ - if (q || size == 3 || (rd & 1)) + if (q || size == 3 || (rd & 1)) { return 1; + } tmp = neon_load_reg(rm, 0); tmp2 = neon_load_reg(rm, 1); for (pass = 0; pass < 2; pass++) { - if (pass == 1) + if (pass == 1) { tmp = tmp2; + } gen_neon_widen(cpu_V0, tmp, size, 1); neon_store_reg64(cpu_V0, rd + pass); } break; case 44: /* VCVT.F16.F32 */ if (!arm_feature(env, ARM_FEATURE_VFP_FP16) - || q || size != 1 || (rm & 1)) - return 1; + || q || size != 1 || (rm & 1)) { + return 1; + } tmp = new_tmp(); tmp2 = new_tmp(); tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0)); @@ -5479,8 +5557,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) break; case 46: /* VCVT.F32.F16 */ if (!arm_feature(env, ARM_FEATURE_VFP_FP16) - || q || size != 1 || (rd & 1)) - return 1; + || q || size != 1 || (rd & 1)) { + return 1; + } tmp3 = new_tmp(); tmp = neon_load_reg(rm, 0); tmp2 = neon_load_reg(rm, 1); @@ -5525,7 +5604,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) } gen_rev16(tmp); break; - case 8: /* CLS */ + case 8: /* VCLS */ switch (size) { case 0: gen_helper_neon_cls_s8(tmp, tmp); break; case 1: gen_helper_neon_cls_s16(tmp, tmp); break; @@ -5533,7 +5612,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) default: dead_tmp(tmp); return 1; } break; - case 9: /* CLZ */ + case 9: /* VCLZ */ switch (size) { case 0: gen_helper_neon_clz_u8(tmp, tmp); break; case 1: gen_helper_neon_clz_u16(tmp, tmp); break; @@ -5541,7 +5620,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) default: dead_tmp(tmp); return 1; } break; - case 10: /* CNT */ + case 10: /* VCNT */ if (size != 0) { dead_tmp(tmp); return 1; @@ -5580,8 +5659,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) default: tcg_temp_free_i32(tmp2); dead_tmp(tmp); return 1; } tcg_temp_free(tmp2); - if (op == 19) + if (op == 19) { tcg_gen_not_i32(tmp, tmp); + } break; case 17: case 20: /* VCGE #0, VCLT #0 */ tmp2 = tcg_const_i32(0); @@ -5592,8 +5672,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) default: tcg_temp_free_i32(tmp2); dead_tmp(tmp); return 1; } tcg_temp_free(tmp2); - if (op == 20) + if (op == 20) { tcg_gen_not_i32(tmp, tmp); + } break; case 18: /* VCEQ #0 */ tmp2 = tcg_const_i32(0); @@ -5642,8 +5723,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) tmp2 = tcg_const_i32(0); gen_helper_neon_cge_f32(tmp, tmp, tmp2); tcg_temp_free(tmp2); - if (op == 28) + if (op == 28) { tcg_gen_not_i32(tmp, tmp); + } break; case 26: /* Float VCEQ #0 */ if (size != 2) { @@ -5761,10 +5843,11 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) if (insn & (1 << 16)) { gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8); } else if (insn & (1 << 17)) { - if ((insn >> 18) & 1) + if ((insn >> 18) & 1) { gen_neon_dup_high16(tmp); - else + } else { gen_neon_dup_low16(tmp); + } } for (pass = 0; pass < (q ? 4 : 2); pass++) { tmp2 = new_tmp(); -- 1.6.5