From: Frank Chang <frank.ch...@sifive.com> Introduce the concepts of fractional LMUL for RVV 1.0. In RVV 1.0, LMUL bits are contiguous in vtype register.
Signed-off-by: Frank Chang <frank.ch...@sifive.com> --- target/riscv/cpu.h | 15 ++++++++------- target/riscv/translate.c | 16 ++++++++++++++-- target/riscv/vector_helper.c | 16 ++++++++++++++-- 3 files changed, 36 insertions(+), 11 deletions(-) diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 08d2c10a024..d0f9a76ca01 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -94,10 +94,10 @@ typedef struct CPURISCVState CPURISCVState; #define RV_VLEN_MAX 256 -FIELD(VTYPE, VLMUL, 0, 2) -FIELD(VTYPE, VSEW, 2, 3) -FIELD(VTYPE, VEDIV, 5, 2) -FIELD(VTYPE, RESERVED, 7, sizeof(target_ulong) * 8 - 9) +FIELD(VTYPE, VLMUL, 0, 3) +FIELD(VTYPE, VSEW, 3, 3) +FIELD(VTYPE, VEDIV, 8, 2) +FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11) FIELD(VTYPE, VILL, sizeof(target_ulong) * 8 - 1, 1) struct CPURISCVState { @@ -368,9 +368,10 @@ typedef RISCVCPU ArchCPU; #include "exec/cpu-all.h" FIELD(TB_FLAGS, VL_EQ_VLMAX, 2, 1) -FIELD(TB_FLAGS, LMUL, 3, 2) -FIELD(TB_FLAGS, SEW, 5, 3) -FIELD(TB_FLAGS, VILL, 8, 1) +FIELD(TB_FLAGS, LMUL, 3, 3) +FIELD(TB_FLAGS, SEW, 6, 3) +/* Skip MSTATUS_VS (0x600) fields */ +FIELD(TB_FLAGS, VILL, 11, 1) /* * A simplification for VLMAX diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 7b6088677d4..10ef55bbeb7 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -59,7 +59,19 @@ typedef struct DisasContext { bool ext_ifencei; /* vector extension */ bool vill; - uint8_t lmul; + /* + * Encode LMUL to lmul as follows: + * LMUL vlmul lmul + * 1 000 0 + * 2 001 1 + * 4 010 2 + * 8 011 3 + * - 100 - + * 1/8 101 -3 + * 1/4 110 -2 + * 1/2 111 -1 + */ + int8_t lmul; uint8_t sew; uint16_t vlen; bool vl_eq_vlmax; @@ -851,7 +863,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->vlen = cpu->cfg.vlen; ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL); ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW); - ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL); + ctx->lmul = sextract32(FIELD_EX32(tb_flags, TB_FLAGS, LMUL), 0, 3); ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX); } diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index f42346cb9ca..37c510b98f0 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -86,9 +86,21 @@ static inline uint32_t vext_vm(uint32_t desc) return FIELD_EX32(simd_data(desc), VDATA, VM); } -static inline uint32_t vext_lmul(uint32_t desc) +/* + * Encode LMUL to lmul as following: + * LMUL vlmul lmul + * 1 000 0 + * 2 001 1 + * 4 010 2 + * 8 011 3 + * - 100 - + * 1/8 101 -3 + * 1/4 110 -2 + * 1/2 111 -1 + */ +static inline int32_t vext_lmul(uint32_t desc) { - return FIELD_EX32(simd_data(desc), VDATA, LMUL); + return sextract32(FIELD_EX32(simd_data(desc), VDATA, LMUL), 0, 3); } static uint32_t vext_wd(uint32_t desc) -- 2.17.1