Implement the MV VQRSHL (vector) insn. Again, the code to perform the actual shifts is borrowed from neon_helper.c.
Signed-off-by: Peter Maydell <peter.mayd...@linaro.org> --- target/arm/helper-mve.h | 8 +++ target/arm/mve.decode | 3 + target/arm/mve_helper.c | 127 +++++++++++++++++++++++++++++++++++++ target/arm/translate-mve.c | 2 + 4 files changed, 140 insertions(+) diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index 352b6a46a5e..a2f9916b24e 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -179,6 +179,14 @@ DEF_HELPER_FLAGS_4(mve_vqshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vqshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vqshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vqrshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vqrshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vqrshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vqrshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vqrshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vqrshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) diff --git a/target/arm/mve.decode b/target/arm/mve.decode index 2c37e265765..e78eab6d659 100644 --- a/target/arm/mve.decode +++ b/target/arm/mve.decode @@ -133,6 +133,9 @@ VQSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op VQSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev VQSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev +VQRSHL_S 111 0 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev +VQRSHL_U 111 1 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev + # Vector miscellaneous VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index 7ac41cb1460..b7f9af4067b 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -637,6 +637,133 @@ DO_2OP_SAT(vqsubsw, 4, int32_t, H4, DO_SQSUB_W) DO_2OP_SAT_S(vqshls, DO_SQSHL_OP) DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP) +#define DO_UQRSHL_OP(src1, src2, satp) \ + ({ \ + int8_t tmp; \ + typeof(src1) dest; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8) { \ + if (src1) { \ + *satp = true; \ + dest = ~0; \ + } else { \ + dest = 0; \ + } \ + } else if (tmp < -(ssize_t)sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \ + dest = src1 >> (sizeof(src1) * 8 - 1); \ + } else if (tmp < 0) { \ + dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + if ((dest >> tmp) != src1) { \ + *satp = true; \ + dest = ~0; \ + } \ + } \ + dest; \ + }) + +/* + * The addition of the rounding constant may overflow, so we use an + * intermediate 64 bit accumulator for the 32-bit version. + */ +#define DO_UQRSHL32_OP(src1, src2, satp) \ + ({ \ + uint32_t dest; \ + uint32_t val = src1; \ + int8_t shift = (int8_t)src2; \ + if (shift >= 32) { \ + if (val) { \ + *satp = true; \ + dest = ~0; \ + } else { \ + dest = 0; \ + } \ + } else if (shift < -32) { \ + dest = 0; \ + } else if (shift == -32) { \ + dest = val >> 31; \ + } else if (shift < 0) { \ + uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift))); \ + dest = big_dest >> -shift; \ + } else { \ + dest = val << shift; \ + if ((dest >> shift) != val) { \ + *satp = true; \ + dest = ~0; \ + } \ + } \ + dest; \ + }) + +#define DO_SQRSHL_OP(src1, src2, satp) \ + ({ \ + int8_t tmp; \ + typeof(src1) dest; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8) { \ + if (src1) { \ + *satp = true; \ + dest = (typeof(dest))(1 << (sizeof(src1) * 8 - 1)); \ + if (src1 > 0) { \ + dest--; \ + } \ + } else { \ + dest = 0; \ + } \ + } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp < 0) { \ + dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + if ((dest >> tmp) != src1) { \ + *satp = true; \ + dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ + if (src1 > 0) { \ + dest--; \ + } \ + } \ + } \ + dest; \ + }) + +#define DO_SQRSHL32_OP(src1, src2, satp) \ + ({ \ + int32_t dest; \ + int32_t val = (int32_t)src1; \ + int8_t shift = (int8_t)src2; \ + if (shift >= 32) { \ + if (val) { \ + *satp = true; \ + dest = (val >> 31) ^ ~(1U << 31); \ + } else { \ + dest = 0; \ + } \ + } else if (shift <= -32) { \ + dest = 0; \ + } else if (shift < 0) { \ + int64_t big_dest = ((int64_t)val + (1 << (-1 - shift))); \ + dest = big_dest >> -shift; \ + } else { \ + dest = val << shift; \ + if ((dest >> shift) != val) { \ + *satp = true; \ + dest = (val >> 31) ^ ~(1U << 31); \ + } \ + } \ + dest; \ + }) + +DO_2OP_SAT(vqrshlub, 1, uint8_t, H1, DO_UQRSHL_OP) +DO_2OP_SAT(vqrshluh, 2, uint16_t, H2, DO_UQRSHL_OP) +DO_2OP_SAT(vqrshluw, 4, uint32_t, H4, DO_UQRSHL32_OP) +DO_2OP_SAT(vqrshlsb, 1, int8_t, H1, DO_SQRSHL_OP) +DO_2OP_SAT(vqrshlsh, 2, int16_t, H2, DO_SQRSHL_OP) +DO_2OP_SAT(vqrshlsw, 4, int32_t, H4, DO_SQRSHL32_OP) + #define DO_2OP_SCALAR(OP, ESIZE, TYPE, H, FN) \ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ uint32_t rm) \ diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index 998f47fb94e..bea561726ea 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -423,6 +423,8 @@ DO_2OP(VQSUB_S, vqsubs) DO_2OP(VQSUB_U, vqsubu) DO_2OP(VQSHL_S, vqshls) DO_2OP(VQSHL_U, vqshlu) +DO_2OP(VQRSHL_S, vqrshls) +DO_2OP(VQRSHL_U, vqrshlu) static bool do_2op_scalar(DisasContext *s, arg_2scalar *a, MVEGenTwoOpScalarFn fn) -- 2.20.1