Implement the MVE VMLA insn, which multiplies a vector by a scalar and accumulates into another vector.
Signed-off-by: Peter Maydell <peter.mayd...@linaro.org> --- target/arm/helper-mve.h | 8 ++++++++ target/arm/mve.decode | 3 +++ target/arm/mve_helper.c | 6 ++++++ target/arm/translate-mve.c | 2 ++ 4 files changed, 19 insertions(+) diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index 088bdd3ca50..50b34c601e1 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -371,6 +371,14 @@ DEF_HELPER_FLAGS_4(mve_vqdmullb_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i3 DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlasb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlash, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlasw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vmlaub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlauh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vmlauw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + DEF_HELPER_FLAGS_4(mve_vmlassb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vmlassh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vmlassw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) diff --git a/target/arm/mve.decode b/target/arm/mve.decode index 0c4708ea988..2e2df61c860 100644 --- a/target/arm/mve.decode +++ b/target/arm/mve.decode @@ -412,6 +412,9 @@ VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar +VMLA_S 1110 1110 0 . .. ... 1 ... 0 1110 . 100 .... @2scalar +VMLA_U 1111 1110 0 . .. ... 1 ... 0 1110 . 100 .... @2scalar + VMLAS_S 1110 1110 0 . .. ... 1 ... 1 1110 . 100 .... @2scalar VMLAS_U 1111 1110 0 . .. ... 1 ... 1 1110 . 100 .... @2scalar diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index 8b70362f012..91c0add8da7 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -1019,6 +1019,12 @@ DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B) DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H) DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W) +/* Vector by scalar plus vector */ +#define DO_VMLA(D, N, M) ((N) * (M) + (D)) + +DO_2OP_ACC_SCALAR_S(vmlas, DO_VMLA) +DO_2OP_ACC_SCALAR_U(vmlau, DO_VMLA) + /* Vector by vector plus scalar */ #define DO_VMLAS(D, N, M) ((N) * (D) + (M)) diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index 67b9c07447a..650f3b95edf 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -620,6 +620,8 @@ DO_2OP_SCALAR(VQSUB_U_scalar, vqsubu_scalar) DO_2OP_SCALAR(VQDMULH_scalar, vqdmulh_scalar) DO_2OP_SCALAR(VQRDMULH_scalar, vqrdmulh_scalar) DO_2OP_SCALAR(VBRSR, vbrsr) +DO_2OP_SCALAR(VMLA_S, vmlas) +DO_2OP_SCALAR(VMLA_U, vmlau) DO_2OP_SCALAR(VMLAS_S, vmlass) DO_2OP_SCALAR(VMLAS_U, vmlasu) -- 2.20.1