From: Monk Chiang <monk.chi...@sifive.com>

According to Section 3.4.2, Vector Register Grouping, in the RISC-V
Vector Specification, the rule for LMUL is LMUL >= SEW/ELEN

Changes since V2:
- Add check on vector-iterators.md
- Add one more testcase to check the VLS use correct mode.

gcc/ChangeLog:
        * config/riscv/riscv-v.cc: Add restrict for insert LMUL.
        config/riscv/riscv-vector-builtins-types.def:
        Use RVV_REQUIRE_ELEN_64 to check LMUL number.
        config/riscv/riscv-vector-switch.def: Likewise.
        * config/riscv/vector-iterators.md: Check TARGET_VECTOR_ELEN_64
        rather than "TARGET_MIN_VLEN > 32" for all iterator.

gcc/testsuite/ChangeLog:
        * gcc.target/riscv/rvv/autovec/pr111391-2.c: Update test.
        gcc.target/riscv/rvv/base/abi-14.c: Update test.
        gcc.target/riscv/rvv/base/abi-16.c: Update test.
        gcc.target/riscv/rvv/base/abi-18.c: Update test.
        gcc.target/riscv/rvv/base/vsetvl_zve32-1.c: New test.
        gcc.target/riscv/rvv/base/vsetvl_zve32-2.c: New test.

Co-authored-by: Kito Cheng <kito.ch...@sifive.com>
---
 gcc/config/riscv/riscv-v.cc                   |   8 +-
 .../riscv/riscv-vector-builtins-types.def     | 322 +++++++++---------
 gcc/config/riscv/riscv-vector-switch.def      |  84 ++---
 gcc/config/riscv/vector-iterators.md          | 256 +++++++-------
 .../gcc.target/riscv/rvv/autovec/pr111391-2.c |   2 +-
 .../gcc.target/riscv/rvv/base/abi-14.c        |  84 ++---
 .../gcc.target/riscv/rvv/base/abi-16.c        |  98 +++---
 .../gcc.target/riscv/rvv/base/abi-18.c        | 112 +++---
 .../riscv/rvv/base/vsetvl_zve32-1.c           |  73 ++++
 .../riscv/rvv/base/vsetvl_zve32-2.c           |  25 ++
 10 files changed, 582 insertions(+), 482 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vsetvl_zve32-1.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vsetvl_zve32-2.c

diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc
index 287eb3e54cf..aae2d274336 100644
--- a/gcc/config/riscv/riscv-v.cc
+++ b/gcc/config/riscv/riscv-v.cc
@@ -1782,13 +1782,15 @@ get_vlmul (machine_mode mode)
       int inner_size = GET_MODE_BITSIZE (GET_MODE_INNER (mode));
       if (size < TARGET_MIN_VLEN)
        {
+         /* Follow rule LMUL >= SEW / ELEN.  */
+         int elen = TARGET_VECTOR_ELEN_64 ? 1 : 2;
          int factor = TARGET_MIN_VLEN / size;
          if (inner_size == 8)
-           factor = MIN (factor, 8);
+           factor = MIN (factor, 8 / elen);
          else if (inner_size == 16)
-           factor = MIN (factor, 4);
+           factor = MIN (factor, 4 / elen);
          else if (inner_size == 32)
-           factor = MIN (factor, 2);
+           factor = MIN (factor, 2 / elen);
          else if (inner_size == 64)
            factor = MIN (factor, 1);
          else
diff --git a/gcc/config/riscv/riscv-vector-builtins-types.def 
b/gcc/config/riscv/riscv-vector-builtins-types.def
index 6b98b93dfb6..857b63758a0 100644
--- a/gcc/config/riscv/riscv-vector-builtins-types.def
+++ b/gcc/config/riscv/riscv-vector-builtins-types.def
@@ -369,20 +369,20 @@ along with GCC; see the file COPYING3. If not see
 #define DEF_RVV_XFQF_OPS(TYPE, REQUIRE)
 #endif
 
-DEF_RVV_I_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_I_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_I_OPS (vint8mf4_t, 0)
 DEF_RVV_I_OPS (vint8mf2_t, 0)
 DEF_RVV_I_OPS (vint8m1_t, 0)
 DEF_RVV_I_OPS (vint8m2_t, 0)
 DEF_RVV_I_OPS (vint8m4_t, 0)
 DEF_RVV_I_OPS (vint8m8_t, 0)
-DEF_RVV_I_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_I_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_I_OPS (vint16mf2_t, 0)
 DEF_RVV_I_OPS (vint16m1_t, 0)
 DEF_RVV_I_OPS (vint16m2_t, 0)
 DEF_RVV_I_OPS (vint16m4_t, 0)
 DEF_RVV_I_OPS (vint16m8_t, 0)
-DEF_RVV_I_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_I_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_I_OPS (vint32m1_t, 0)
 DEF_RVV_I_OPS (vint32m2_t, 0)
 DEF_RVV_I_OPS (vint32m4_t, 0)
@@ -392,20 +392,20 @@ DEF_RVV_I_OPS (vint64m2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_I_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_I_OPS (vint64m8_t, RVV_REQUIRE_ELEN_64)
 
-DEF_RVV_U_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_U_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_U_OPS (vuint8mf4_t, 0)
 DEF_RVV_U_OPS (vuint8mf2_t, 0)
 DEF_RVV_U_OPS (vuint8m1_t, 0)
 DEF_RVV_U_OPS (vuint8m2_t, 0)
 DEF_RVV_U_OPS (vuint8m4_t, 0)
 DEF_RVV_U_OPS (vuint8m8_t, 0)
-DEF_RVV_U_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_U_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_U_OPS (vuint16mf2_t, 0)
 DEF_RVV_U_OPS (vuint16m1_t, 0)
 DEF_RVV_U_OPS (vuint16m2_t, 0)
 DEF_RVV_U_OPS (vuint16m4_t, 0)
 DEF_RVV_U_OPS (vuint16m8_t, 0)
-DEF_RVV_U_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_U_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_U_OPS (vuint32m1_t, 0)
 DEF_RVV_U_OPS (vuint32m2_t, 0)
 DEF_RVV_U_OPS (vuint32m4_t, 0)
@@ -415,21 +415,21 @@ DEF_RVV_U_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_U_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_U_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
 
-DEF_RVV_F_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_F_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | RVV_REQUIRE_ELEN_64)
 DEF_RVV_F_OPS (vbfloat16mf2_t, RVV_REQUIRE_ELEN_BF_16)
 DEF_RVV_F_OPS (vbfloat16m1_t,  RVV_REQUIRE_ELEN_BF_16)
 DEF_RVV_F_OPS (vbfloat16m2_t,  RVV_REQUIRE_ELEN_BF_16)
 DEF_RVV_F_OPS (vbfloat16m4_t,  RVV_REQUIRE_ELEN_BF_16)
 DEF_RVV_F_OPS (vbfloat16m8_t,  RVV_REQUIRE_ELEN_BF_16)
 
-DEF_RVV_F_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_F_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
 DEF_RVV_F_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_F_OPS (vfloat16m1_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_F_OPS (vfloat16m2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_F_OPS (vfloat16m4_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_F_OPS (vfloat16m8_t, RVV_REQUIRE_ELEN_FP_16)
 
-DEF_RVV_F_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_F_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
 DEF_RVV_F_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_F_OPS (vfloat32m2_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_F_OPS (vfloat32m4_t, RVV_REQUIRE_ELEN_FP_32)
@@ -439,7 +439,7 @@ DEF_RVV_F_OPS (vfloat64m2_t, RVV_REQUIRE_ELEN_FP_64)
 DEF_RVV_F_OPS (vfloat64m4_t, RVV_REQUIRE_ELEN_FP_64)
 DEF_RVV_F_OPS (vfloat64m8_t, RVV_REQUIRE_ELEN_FP_64)
 
-DEF_RVV_B_OPS (vbool64_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_B_OPS (vbool64_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_B_OPS (vbool32_t, 0)
 DEF_RVV_B_OPS (vbool16_t, 0)
 DEF_RVV_B_OPS (vbool8_t, 0)
@@ -447,13 +447,13 @@ DEF_RVV_B_OPS (vbool4_t, 0)
 DEF_RVV_B_OPS (vbool2_t, 0)
 DEF_RVV_B_OPS (vbool1_t, 0)
 
-DEF_RVV_WEXTI_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WEXTI_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_WEXTI_OPS (vint16mf2_t, 0)
 DEF_RVV_WEXTI_OPS (vint16m1_t, 0)
 DEF_RVV_WEXTI_OPS (vint16m2_t, 0)
 DEF_RVV_WEXTI_OPS (vint16m4_t, 0)
 DEF_RVV_WEXTI_OPS (vint16m8_t, 0)
-DEF_RVV_WEXTI_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WEXTI_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_WEXTI_OPS (vint32m1_t, 0)
 DEF_RVV_WEXTI_OPS (vint32m2_t, 0)
 DEF_RVV_WEXTI_OPS (vint32m4_t, 0)
@@ -463,7 +463,7 @@ DEF_RVV_WEXTI_OPS (vint64m2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_WEXTI_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_WEXTI_OPS (vint64m8_t, RVV_REQUIRE_ELEN_64)
 
-DEF_RVV_QEXTI_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_QEXTI_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_QEXTI_OPS (vint32m1_t, 0)
 DEF_RVV_QEXTI_OPS (vint32m2_t, 0)
 DEF_RVV_QEXTI_OPS (vint32m4_t, 0)
@@ -478,13 +478,13 @@ DEF_RVV_OEXTI_OPS (vint64m2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_OEXTI_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_OEXTI_OPS (vint64m8_t, RVV_REQUIRE_ELEN_64)
 
-DEF_RVV_WEXTU_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WEXTU_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_WEXTU_OPS (vuint16mf2_t, 0)
 DEF_RVV_WEXTU_OPS (vuint16m1_t, 0)
 DEF_RVV_WEXTU_OPS (vuint16m2_t, 0)
 DEF_RVV_WEXTU_OPS (vuint16m4_t, 0)
 DEF_RVV_WEXTU_OPS (vuint16m8_t, 0)
-DEF_RVV_WEXTU_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WEXTU_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_WEXTU_OPS (vuint32m1_t, 0)
 DEF_RVV_WEXTU_OPS (vuint32m2_t, 0)
 DEF_RVV_WEXTU_OPS (vuint32m4_t, 0)
@@ -494,7 +494,7 @@ DEF_RVV_WEXTU_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_WEXTU_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_WEXTU_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
 
-DEF_RVV_QEXTU_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_QEXTU_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_QEXTU_OPS (vuint32m1_t, 0)
 DEF_RVV_QEXTU_OPS (vuint32m2_t, 0)
 DEF_RVV_QEXTU_OPS (vuint32m4_t, 0)
@@ -509,20 +509,20 @@ DEF_RVV_OEXTU_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_OEXTU_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_OEXTU_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
 
-DEF_RVV_FULL_V_I_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_FULL_V_I_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_FULL_V_I_OPS (vint8mf4_t, 0)
 DEF_RVV_FULL_V_I_OPS (vint8mf2_t, 0)
 DEF_RVV_FULL_V_I_OPS (vint8m1_t, 0)
 DEF_RVV_FULL_V_I_OPS (vint8m2_t, 0)
 DEF_RVV_FULL_V_I_OPS (vint8m4_t, 0)
 DEF_RVV_FULL_V_I_OPS (vint8m8_t, 0)
-DEF_RVV_FULL_V_I_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_FULL_V_I_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_FULL_V_I_OPS (vint16mf2_t, 0)
 DEF_RVV_FULL_V_I_OPS (vint16m1_t, 0)
 DEF_RVV_FULL_V_I_OPS (vint16m2_t, 0)
 DEF_RVV_FULL_V_I_OPS (vint16m4_t, 0)
 DEF_RVV_FULL_V_I_OPS (vint16m8_t, 0)
-DEF_RVV_FULL_V_I_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_FULL_V_I_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_FULL_V_I_OPS (vint32m1_t, 0)
 DEF_RVV_FULL_V_I_OPS (vint32m2_t, 0)
 DEF_RVV_FULL_V_I_OPS (vint32m4_t, 0)
@@ -532,20 +532,20 @@ DEF_RVV_FULL_V_I_OPS (vint64m2_t, RVV_REQUIRE_FULL_V)
 DEF_RVV_FULL_V_I_OPS (vint64m4_t, RVV_REQUIRE_FULL_V)
 DEF_RVV_FULL_V_I_OPS (vint64m8_t, RVV_REQUIRE_FULL_V)
 
-DEF_RVV_FULL_V_U_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_FULL_V_U_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_FULL_V_U_OPS (vuint8mf4_t, 0)
 DEF_RVV_FULL_V_U_OPS (vuint8mf2_t, 0)
 DEF_RVV_FULL_V_U_OPS (vuint8m1_t, 0)
 DEF_RVV_FULL_V_U_OPS (vuint8m2_t, 0)
 DEF_RVV_FULL_V_U_OPS (vuint8m4_t, 0)
 DEF_RVV_FULL_V_U_OPS (vuint8m8_t, 0)
-DEF_RVV_FULL_V_U_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_FULL_V_U_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_FULL_V_U_OPS (vuint16mf2_t, 0)
 DEF_RVV_FULL_V_U_OPS (vuint16m1_t, 0)
 DEF_RVV_FULL_V_U_OPS (vuint16m2_t, 0)
 DEF_RVV_FULL_V_U_OPS (vuint16m4_t, 0)
 DEF_RVV_FULL_V_U_OPS (vuint16m8_t, 0)
-DEF_RVV_FULL_V_U_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_FULL_V_U_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_FULL_V_U_OPS (vuint32m1_t, 0)
 DEF_RVV_FULL_V_U_OPS (vuint32m2_t, 0)
 DEF_RVV_FULL_V_U_OPS (vuint32m4_t, 0)
@@ -555,7 +555,7 @@ DEF_RVV_FULL_V_U_OPS (vuint64m2_t, RVV_REQUIRE_FULL_V)
 DEF_RVV_FULL_V_U_OPS (vuint64m4_t, RVV_REQUIRE_FULL_V)
 DEF_RVV_FULL_V_U_OPS (vuint64m8_t, RVV_REQUIRE_FULL_V)
 
-DEF_RVV_WEXTF_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WEXTF_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
 DEF_RVV_WEXTF_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_WEXTF_OPS (vfloat32m2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_WEXTF_OPS (vfloat32m4_t, RVV_REQUIRE_ELEN_FP_16)
@@ -566,14 +566,14 @@ DEF_RVV_WEXTF_OPS (vfloat64m2_t, RVV_REQUIRE_ELEN_FP_64)
 DEF_RVV_WEXTF_OPS (vfloat64m4_t, RVV_REQUIRE_ELEN_FP_64)
 DEF_RVV_WEXTF_OPS (vfloat64m8_t, RVV_REQUIRE_ELEN_FP_64)
 
-DEF_RVV_CONVERT_I_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_CONVERT_I_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_CONVERT_I_OPS (vint16mf2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_CONVERT_I_OPS (vint16m1_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_CONVERT_I_OPS (vint16m2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_CONVERT_I_OPS (vint16m4_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_CONVERT_I_OPS (vint16m8_t, RVV_REQUIRE_ELEN_FP_16)
 
-DEF_RVV_CONVERT_I_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_CONVERT_I_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_CONVERT_I_OPS (vint32m1_t, 0)
 DEF_RVV_CONVERT_I_OPS (vint32m2_t, 0)
 DEF_RVV_CONVERT_I_OPS (vint32m4_t, 0)
@@ -583,14 +583,14 @@ DEF_RVV_CONVERT_I_OPS (vint64m2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_CONVERT_I_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_CONVERT_I_OPS (vint64m8_t, RVV_REQUIRE_ELEN_64)
 
-DEF_RVV_CONVERT_U_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_CONVERT_U_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_CONVERT_U_OPS (vuint16mf2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_CONVERT_U_OPS (vuint16m1_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_CONVERT_U_OPS (vuint16m2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_CONVERT_U_OPS (vuint16m4_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_CONVERT_U_OPS (vuint16m8_t, RVV_REQUIRE_ELEN_FP_16)
 
-DEF_RVV_CONVERT_U_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_CONVERT_U_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_CONVERT_U_OPS (vuint32m1_t, 0)
 DEF_RVV_CONVERT_U_OPS (vuint32m2_t, 0)
 DEF_RVV_CONVERT_U_OPS (vuint32m4_t, 0)
@@ -600,7 +600,7 @@ DEF_RVV_CONVERT_U_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_CONVERT_U_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_CONVERT_U_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
 
-DEF_RVV_WCONVERT_I_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WCONVERT_I_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_WCONVERT_I_OPS (vint32m1_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_WCONVERT_I_OPS (vint32m2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_WCONVERT_I_OPS (vint32m4_t, RVV_REQUIRE_ELEN_FP_16)
@@ -611,7 +611,7 @@ DEF_RVV_WCONVERT_I_OPS (vint64m2_t, RVV_REQUIRE_ELEN_FP_32 
| RVV_REQUIRE_ELEN_64
 DEF_RVV_WCONVERT_I_OPS (vint64m4_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_WCONVERT_I_OPS (vint64m8_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_ELEN_64)
 
-DEF_RVV_WCONVERT_U_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WCONVERT_U_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_WCONVERT_U_OPS (vuint32m1_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_WCONVERT_U_OPS (vuint32m2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_WCONVERT_U_OPS (vuint32m4_t, RVV_REQUIRE_ELEN_FP_16)
@@ -622,7 +622,7 @@ DEF_RVV_WCONVERT_U_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_FP_32 
| RVV_REQUIRE_ELEN_6
 DEF_RVV_WCONVERT_U_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_WCONVERT_U_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_ELEN_64)
 
-DEF_RVV_WCONVERT_F_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WCONVERT_F_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_WCONVERT_F_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_WCONVERT_F_OPS (vfloat32m2_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_WCONVERT_F_OPS (vfloat32m4_t, RVV_REQUIRE_ELEN_FP_32)
@@ -633,76 +633,76 @@ DEF_RVV_WCONVERT_F_OPS (vfloat64m2_t, 
RVV_REQUIRE_ELEN_FP_64)
 DEF_RVV_WCONVERT_F_OPS (vfloat64m4_t, RVV_REQUIRE_ELEN_FP_64)
 DEF_RVV_WCONVERT_F_OPS (vfloat64m8_t, RVV_REQUIRE_ELEN_FP_64)
 
-DEF_RVV_F32_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_F32_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
 DEF_RVV_F32_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_F32_OPS (vfloat32m2_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_F32_OPS (vfloat32m4_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_F32_OPS (vfloat32m8_t, RVV_REQUIRE_ELEN_FP_32)
 
-DEF_RVV_WI_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WI_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_WI_OPS (vint8mf4_t, 0)
 DEF_RVV_WI_OPS (vint8mf2_t, 0)
 DEF_RVV_WI_OPS (vint8m1_t, 0)
 DEF_RVV_WI_OPS (vint8m2_t, 0)
 DEF_RVV_WI_OPS (vint8m4_t, 0)
 DEF_RVV_WI_OPS (vint8m8_t, 0)
-DEF_RVV_WI_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WI_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_WI_OPS (vint16mf2_t, 0)
 DEF_RVV_WI_OPS (vint16m1_t, 0)
 DEF_RVV_WI_OPS (vint16m2_t, 0)
 DEF_RVV_WI_OPS (vint16m4_t, 0)
 DEF_RVV_WI_OPS (vint16m8_t, 0)
-DEF_RVV_WI_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WI_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_WI_OPS (vint32m1_t, 0)
 DEF_RVV_WI_OPS (vint32m2_t, 0)
 DEF_RVV_WI_OPS (vint32m4_t, 0)
 DEF_RVV_WI_OPS (vint32m8_t, 0)
 
-DEF_RVV_WU_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WU_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_WU_OPS (vuint8mf4_t, 0)
 DEF_RVV_WU_OPS (vuint8mf2_t, 0)
 DEF_RVV_WU_OPS (vuint8m1_t, 0)
 DEF_RVV_WU_OPS (vuint8m2_t, 0)
 DEF_RVV_WU_OPS (vuint8m4_t, 0)
 DEF_RVV_WU_OPS (vuint8m8_t, 0)
-DEF_RVV_WU_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WU_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_WU_OPS (vuint16mf2_t, 0)
 DEF_RVV_WU_OPS (vuint16m1_t, 0)
 DEF_RVV_WU_OPS (vuint16m2_t, 0)
 DEF_RVV_WU_OPS (vuint16m4_t, 0)
 DEF_RVV_WU_OPS (vuint16m8_t, 0)
-DEF_RVV_WU_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WU_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_WU_OPS (vuint32m1_t, 0)
 DEF_RVV_WU_OPS (vuint32m2_t, 0)
 DEF_RVV_WU_OPS (vuint32m4_t, 0)
 DEF_RVV_WU_OPS (vuint32m8_t, 0)
 
-DEF_RVV_WF_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WF_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
 DEF_RVV_WF_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_WF_OPS (vfloat16m1_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_WF_OPS (vfloat16m2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_WF_OPS (vfloat16m4_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_WF_OPS (vfloat16m8_t, RVV_REQUIRE_ELEN_FP_16)
 
-DEF_RVV_WF_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_WF_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
 DEF_RVV_WF_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_WF_OPS (vfloat32m2_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_WF_OPS (vfloat32m4_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_WF_OPS (vfloat32m8_t, RVV_REQUIRE_ELEN_FP_32)
 
-DEF_RVV_EI16_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EI16_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EI16_OPS (vint8mf4_t, 0)
 DEF_RVV_EI16_OPS (vint8mf2_t, 0)
 DEF_RVV_EI16_OPS (vint8m1_t, 0)
 DEF_RVV_EI16_OPS (vint8m2_t, 0)
 DEF_RVV_EI16_OPS (vint8m4_t, 0)
-DEF_RVV_EI16_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EI16_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EI16_OPS (vint16mf2_t, 0)
 DEF_RVV_EI16_OPS (vint16m1_t, 0)
 DEF_RVV_EI16_OPS (vint16m2_t, 0)
 DEF_RVV_EI16_OPS (vint16m4_t, 0)
 DEF_RVV_EI16_OPS (vint16m8_t, 0)
-DEF_RVV_EI16_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EI16_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EI16_OPS (vint32m1_t, 0)
 DEF_RVV_EI16_OPS (vint32m2_t, 0)
 DEF_RVV_EI16_OPS (vint32m4_t, 0)
@@ -711,19 +711,19 @@ DEF_RVV_EI16_OPS (vint64m1_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EI16_OPS (vint64m2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EI16_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EI16_OPS (vint64m8_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_EI16_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EI16_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EI16_OPS (vuint8mf4_t, 0)
 DEF_RVV_EI16_OPS (vuint8mf2_t, 0)
 DEF_RVV_EI16_OPS (vuint8m1_t, 0)
 DEF_RVV_EI16_OPS (vuint8m2_t, 0)
 DEF_RVV_EI16_OPS (vuint8m4_t, 0)
-DEF_RVV_EI16_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EI16_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EI16_OPS (vuint16mf2_t, 0)
 DEF_RVV_EI16_OPS (vuint16m1_t, 0)
 DEF_RVV_EI16_OPS (vuint16m2_t, 0)
 DEF_RVV_EI16_OPS (vuint16m4_t, 0)
 DEF_RVV_EI16_OPS (vuint16m8_t, 0)
-DEF_RVV_EI16_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EI16_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EI16_OPS (vuint32m1_t, 0)
 DEF_RVV_EI16_OPS (vuint32m2_t, 0)
 DEF_RVV_EI16_OPS (vuint32m4_t, 0)
@@ -733,14 +733,14 @@ DEF_RVV_EI16_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EI16_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EI16_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
 
-DEF_RVV_EI16_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EI16_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
 DEF_RVV_EI16_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_EI16_OPS (vfloat16m1_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_EI16_OPS (vfloat16m2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_EI16_OPS (vfloat16m4_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_EI16_OPS (vfloat16m8_t, RVV_REQUIRE_ELEN_FP_16)
 
-DEF_RVV_EI16_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EI16_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | RVV_REQUIRE_ELEN_64)
 DEF_RVV_EI16_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_EI16_OPS (vfloat32m2_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_EI16_OPS (vfloat32m4_t, RVV_REQUIRE_ELEN_FP_32)
@@ -751,13 +751,13 @@ DEF_RVV_EI16_OPS (vfloat64m2_t, RVV_REQUIRE_ELEN_FP_64)
 DEF_RVV_EI16_OPS (vfloat64m4_t, RVV_REQUIRE_ELEN_FP_64)
 DEF_RVV_EI16_OPS (vfloat64m8_t, RVV_REQUIRE_ELEN_FP_64)
 
-DEF_RVV_EEW8_INTERPRET_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EEW8_INTERPRET_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EEW8_INTERPRET_OPS (vint16mf2_t, 0)
 DEF_RVV_EEW8_INTERPRET_OPS (vint16m1_t, 0)
 DEF_RVV_EEW8_INTERPRET_OPS (vint16m2_t, 0)
 DEF_RVV_EEW8_INTERPRET_OPS (vint16m4_t, 0)
 DEF_RVV_EEW8_INTERPRET_OPS (vint16m8_t, 0)
-DEF_RVV_EEW8_INTERPRET_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EEW8_INTERPRET_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EEW8_INTERPRET_OPS (vint32m1_t, 0)
 DEF_RVV_EEW8_INTERPRET_OPS (vint32m2_t, 0)
 DEF_RVV_EEW8_INTERPRET_OPS (vint32m4_t, 0)
@@ -766,13 +766,13 @@ DEF_RVV_EEW8_INTERPRET_OPS (vint64m1_t, 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_EEW8_INTERPRET_OPS (vint64m2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EEW8_INTERPRET_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EEW8_INTERPRET_OPS (vint64m8_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_EEW8_INTERPRET_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EEW8_INTERPRET_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EEW8_INTERPRET_OPS (vuint16mf2_t, 0)
 DEF_RVV_EEW8_INTERPRET_OPS (vuint16m1_t, 0)
 DEF_RVV_EEW8_INTERPRET_OPS (vuint16m2_t, 0)
 DEF_RVV_EEW8_INTERPRET_OPS (vuint16m4_t, 0)
 DEF_RVV_EEW8_INTERPRET_OPS (vuint16m8_t, 0)
-DEF_RVV_EEW8_INTERPRET_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EEW8_INTERPRET_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EEW8_INTERPRET_OPS (vuint32m1_t, 0)
 DEF_RVV_EEW8_INTERPRET_OPS (vuint32m2_t, 0)
 DEF_RVV_EEW8_INTERPRET_OPS (vuint32m4_t, 0)
@@ -788,7 +788,7 @@ DEF_RVV_EEW16_INTERPRET_OPS (vint8m1_t, 0)
 DEF_RVV_EEW16_INTERPRET_OPS (vint8m2_t, 0)
 DEF_RVV_EEW16_INTERPRET_OPS (vint8m4_t, 0)
 DEF_RVV_EEW16_INTERPRET_OPS (vint8m8_t, 0)
-DEF_RVV_EEW16_INTERPRET_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EEW16_INTERPRET_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EEW16_INTERPRET_OPS (vint32m1_t, 0)
 DEF_RVV_EEW16_INTERPRET_OPS (vint32m2_t, 0)
 DEF_RVV_EEW16_INTERPRET_OPS (vint32m4_t, 0)
@@ -803,7 +803,7 @@ DEF_RVV_EEW16_INTERPRET_OPS (vuint8m1_t, 0)
 DEF_RVV_EEW16_INTERPRET_OPS (vuint8m2_t, 0)
 DEF_RVV_EEW16_INTERPRET_OPS (vuint8m4_t, 0)
 DEF_RVV_EEW16_INTERPRET_OPS (vuint8m8_t, 0)
-DEF_RVV_EEW16_INTERPRET_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_EEW16_INTERPRET_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_EEW16_INTERPRET_OPS (vuint32m1_t, 0)
 DEF_RVV_EEW16_INTERPRET_OPS (vuint32m2_t, 0)
 DEF_RVV_EEW16_INTERPRET_OPS (vuint32m4_t, 0)
@@ -994,53 +994,53 @@ DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool16_t, 0)
 DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool32_t, 0)
 DEF_RVV_UNSIGNED_EEW64_LMUL1_INTERPRET_OPS(vbool64_t, RVV_REQUIRE_ELEN_64)
 
-DEF_RVV_X2_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X2_VLMUL_EXT_OPS (vint8mf4_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vint8mf2_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vint8m1_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vint8m2_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vint8m4_t, 0)
-DEF_RVV_X2_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X2_VLMUL_EXT_OPS (vint16mf2_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vint16m1_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vint16m2_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vint16m4_t, 0)
-DEF_RVV_X2_VLMUL_EXT_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X2_VLMUL_EXT_OPS (vint32m1_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vint32m2_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vint32m4_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vint64m1_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X2_VLMUL_EXT_OPS (vint64m2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X2_VLMUL_EXT_OPS (vint64m4_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_X2_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X2_VLMUL_EXT_OPS (vuint8mf4_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vuint8mf2_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vuint8m1_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vuint8m2_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vuint8m4_t, 0)
-DEF_RVV_X2_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X2_VLMUL_EXT_OPS (vuint16mf2_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vuint16m1_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vuint16m2_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vuint16m4_t, 0)
-DEF_RVV_X2_VLMUL_EXT_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X2_VLMUL_EXT_OPS (vuint32m1_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vuint32m2_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vuint32m4_t, 0)
 DEF_RVV_X2_VLMUL_EXT_OPS (vuint64m1_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X2_VLMUL_EXT_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X2_VLMUL_EXT_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_X2_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_X2_VLMUL_EXT_OPS (vbfloat16mf2_t, RVV_REQUIRE_ELEN_BF_16)
 DEF_RVV_X2_VLMUL_EXT_OPS (vbfloat16m1_t, RVV_REQUIRE_ELEN_BF_16)
 DEF_RVV_X2_VLMUL_EXT_OPS (vbfloat16m2_t, RVV_REQUIRE_ELEN_BF_16)
 DEF_RVV_X2_VLMUL_EXT_OPS (vbfloat16m4_t, RVV_REQUIRE_ELEN_BF_16)
-DEF_RVV_X2_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_X2_VLMUL_EXT_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_X2_VLMUL_EXT_OPS (vfloat16m1_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_X2_VLMUL_EXT_OPS (vfloat16m2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_X2_VLMUL_EXT_OPS (vfloat16m4_t, RVV_REQUIRE_ELEN_FP_16)
-DEF_RVV_X2_VLMUL_EXT_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_VLMUL_EXT_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_X2_VLMUL_EXT_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_X2_VLMUL_EXT_OPS (vfloat32m2_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_X2_VLMUL_EXT_OPS (vfloat32m4_t, RVV_REQUIRE_ELEN_FP_32)
@@ -1048,107 +1048,107 @@ DEF_RVV_X2_VLMUL_EXT_OPS (vfloat64m1_t, 
RVV_REQUIRE_ELEN_FP_64)
 DEF_RVV_X2_VLMUL_EXT_OPS (vfloat64m2_t, RVV_REQUIRE_ELEN_FP_64)
 DEF_RVV_X2_VLMUL_EXT_OPS (vfloat64m4_t, RVV_REQUIRE_ELEN_FP_64)
 
-DEF_RVV_X4_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X4_VLMUL_EXT_OPS (vint8mf4_t, 0)
 DEF_RVV_X4_VLMUL_EXT_OPS (vint8mf2_t, 0)
 DEF_RVV_X4_VLMUL_EXT_OPS (vint8m1_t, 0)
 DEF_RVV_X4_VLMUL_EXT_OPS (vint8m2_t, 0)
-DEF_RVV_X4_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X4_VLMUL_EXT_OPS (vint16mf2_t, 0)
 DEF_RVV_X4_VLMUL_EXT_OPS (vint16m1_t, 0)
 DEF_RVV_X4_VLMUL_EXT_OPS (vint16m2_t, 0)
-DEF_RVV_X4_VLMUL_EXT_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X4_VLMUL_EXT_OPS (vint32m1_t, 0)
 DEF_RVV_X4_VLMUL_EXT_OPS (vint32m2_t, 0)
 DEF_RVV_X4_VLMUL_EXT_OPS (vint64m1_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X4_VLMUL_EXT_OPS (vint64m2_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_X4_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X4_VLMUL_EXT_OPS (vuint8mf4_t, 0)
 DEF_RVV_X4_VLMUL_EXT_OPS (vuint8mf2_t, 0)
 DEF_RVV_X4_VLMUL_EXT_OPS (vuint8m1_t, 0)
 DEF_RVV_X4_VLMUL_EXT_OPS (vuint8m2_t, 0)
-DEF_RVV_X4_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X4_VLMUL_EXT_OPS (vuint16mf2_t, 0)
 DEF_RVV_X4_VLMUL_EXT_OPS (vuint16m1_t, 0)
 DEF_RVV_X4_VLMUL_EXT_OPS (vuint16m2_t, 0)
-DEF_RVV_X4_VLMUL_EXT_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X4_VLMUL_EXT_OPS (vuint32m1_t, 0)
 DEF_RVV_X4_VLMUL_EXT_OPS (vuint32m2_t, 0)
 DEF_RVV_X4_VLMUL_EXT_OPS (vuint64m1_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X4_VLMUL_EXT_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_X4_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_X4_VLMUL_EXT_OPS (vbfloat16mf2_t, RVV_REQUIRE_ELEN_BF_16)
 DEF_RVV_X4_VLMUL_EXT_OPS (vbfloat16m1_t, RVV_REQUIRE_ELEN_BF_16)
 DEF_RVV_X4_VLMUL_EXT_OPS (vbfloat16m2_t, RVV_REQUIRE_ELEN_BF_16)
-DEF_RVV_X4_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_X4_VLMUL_EXT_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_X4_VLMUL_EXT_OPS (vfloat16m1_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_X4_VLMUL_EXT_OPS (vfloat16m2_t, RVV_REQUIRE_ELEN_FP_16)
-DEF_RVV_X4_VLMUL_EXT_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X4_VLMUL_EXT_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_X4_VLMUL_EXT_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_X4_VLMUL_EXT_OPS (vfloat32m2_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_X4_VLMUL_EXT_OPS (vfloat64m1_t, RVV_REQUIRE_ELEN_FP_64)
 DEF_RVV_X4_VLMUL_EXT_OPS (vfloat64m2_t, RVV_REQUIRE_ELEN_FP_64)
 
-DEF_RVV_X8_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X8_VLMUL_EXT_OPS (vint8mf4_t, 0)
 DEF_RVV_X8_VLMUL_EXT_OPS (vint8mf2_t, 0)
 DEF_RVV_X8_VLMUL_EXT_OPS (vint8m1_t, 0)
-DEF_RVV_X8_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X8_VLMUL_EXT_OPS (vint16mf2_t, 0)
 DEF_RVV_X8_VLMUL_EXT_OPS (vint16m1_t, 0)
-DEF_RVV_X8_VLMUL_EXT_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X8_VLMUL_EXT_OPS (vint32m1_t, 0)
 DEF_RVV_X8_VLMUL_EXT_OPS (vint64m1_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_X8_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X8_VLMUL_EXT_OPS (vuint8mf4_t, 0)
 DEF_RVV_X8_VLMUL_EXT_OPS (vuint8mf2_t, 0)
 DEF_RVV_X8_VLMUL_EXT_OPS (vuint8m1_t, 0)
-DEF_RVV_X8_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X8_VLMUL_EXT_OPS (vuint16mf2_t, 0)
 DEF_RVV_X8_VLMUL_EXT_OPS (vuint16m1_t, 0)
-DEF_RVV_X8_VLMUL_EXT_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X8_VLMUL_EXT_OPS (vuint32m1_t, 0)
 DEF_RVV_X8_VLMUL_EXT_OPS (vuint64m1_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_X8_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_X8_VLMUL_EXT_OPS (vbfloat16mf2_t, RVV_REQUIRE_ELEN_BF_16)
 DEF_RVV_X8_VLMUL_EXT_OPS (vbfloat16m1_t, RVV_REQUIRE_ELEN_BF_16)
-DEF_RVV_X8_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_X8_VLMUL_EXT_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_X8_VLMUL_EXT_OPS (vfloat16m1_t, RVV_REQUIRE_ELEN_FP_16)
-DEF_RVV_X8_VLMUL_EXT_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X8_VLMUL_EXT_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_X8_VLMUL_EXT_OPS (vfloat32m1_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_X8_VLMUL_EXT_OPS (vfloat64m1_t, RVV_REQUIRE_ELEN_FP_64)
 
-DEF_RVV_X16_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X16_VLMUL_EXT_OPS (vint8mf4_t, 0)
 DEF_RVV_X16_VLMUL_EXT_OPS (vint8mf2_t, 0)
-DEF_RVV_X16_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X16_VLMUL_EXT_OPS (vint16mf2_t, 0)
-DEF_RVV_X16_VLMUL_EXT_OPS (vint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_X16_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vint32mf2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X16_VLMUL_EXT_OPS (vuint8mf4_t, 0)
 DEF_RVV_X16_VLMUL_EXT_OPS (vuint8mf2_t, 0)
-DEF_RVV_X16_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X16_VLMUL_EXT_OPS (vuint16mf2_t, 0)
-DEF_RVV_X16_VLMUL_EXT_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_X16_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_X16_VLMUL_EXT_OPS (vbfloat16mf2_t, RVV_REQUIRE_ELEN_BF_16)
-DEF_RVV_X16_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_X16_VLMUL_EXT_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
-DEF_RVV_X16_VLMUL_EXT_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X16_VLMUL_EXT_OPS (vfloat32mf2_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_ELEN_64)
 
-DEF_RVV_X32_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X32_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X32_VLMUL_EXT_OPS (vint8mf4_t, 0)
-DEF_RVV_X32_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_X32_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X32_VLMUL_EXT_OPS (vint16mf4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_X32_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_X32_VLMUL_EXT_OPS (vuint8mf4_t, 0)
-DEF_RVV_X32_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_X32_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_X32_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X32_VLMUL_EXT_OPS (vuint16mf4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_X32_VLMUL_EXT_OPS (vbfloat16mf4_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_X32_VLMUL_EXT_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_ELEN_64)
 
-DEF_RVV_X64_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_X64_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X64_VLMUL_EXT_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_X64_VLMUL_EXT_OPS (vuint8mf8_t, RVV_REQUIRE_ELEN_64)
 
 DEF_RVV_LMUL1_OPS (vint8m1_t, 0)
 DEF_RVV_LMUL1_OPS (vint16m1_t, 0)
@@ -1189,20 +1189,20 @@ DEF_RVV_LMUL4_OPS (vbfloat16m4_t, 
RVV_REQUIRE_ELEN_BF_16)
 DEF_RVV_LMUL4_OPS (vfloat32m4_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_LMUL4_OPS (vfloat64m4_t, RVV_REQUIRE_ELEN_FP_64)
 
-DEF_RVV_TUPLE_OPS (vint8mf8x2_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint8mf8x2_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint8mf8x3_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint8mf8x3_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint8mf8x4_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint8mf8x4_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint8mf8x5_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint8mf8x5_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint8mf8x6_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint8mf8x6_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint8mf8x7_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint8mf8x7_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint8mf8x8_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint8mf8x8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_TUPLE_OPS (vint8mf8x2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint8mf8x2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint8mf8x3_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint8mf8x3_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint8mf8x4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint8mf8x4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint8mf8x5_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint8mf8x5_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint8mf8x6_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint8mf8x6_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint8mf8x7_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint8mf8x7_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint8mf8x8_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint8mf8x8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_TUPLE_OPS (vint8mf4x2_t, 0)
 DEF_RVV_TUPLE_OPS (vuint8mf4x2_t, 0)
 DEF_RVV_TUPLE_OPS (vint8mf4x3_t, 0)
@@ -1253,20 +1253,20 @@ DEF_RVV_TUPLE_OPS (vint8m2x4_t, 0)
 DEF_RVV_TUPLE_OPS (vuint8m2x4_t, 0)
 DEF_RVV_TUPLE_OPS (vint8m4x2_t, 0)
 DEF_RVV_TUPLE_OPS (vuint8m4x2_t, 0)
-DEF_RVV_TUPLE_OPS (vint16mf4x2_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint16mf4x2_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint16mf4x3_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint16mf4x3_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint16mf4x4_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint16mf4x4_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint16mf4x5_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint16mf4x5_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint16mf4x6_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint16mf4x6_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint16mf4x7_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint16mf4x7_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint16mf4x8_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint16mf4x8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_TUPLE_OPS (vint16mf4x2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint16mf4x2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint16mf4x3_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint16mf4x3_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint16mf4x4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint16mf4x4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint16mf4x5_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint16mf4x5_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint16mf4x6_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint16mf4x6_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint16mf4x7_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint16mf4x7_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint16mf4x8_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint16mf4x8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_TUPLE_OPS (vint16mf2x2_t, 0)
 DEF_RVV_TUPLE_OPS (vuint16mf2x2_t, 0)
 DEF_RVV_TUPLE_OPS (vint16mf2x3_t, 0)
@@ -1303,20 +1303,20 @@ DEF_RVV_TUPLE_OPS (vint16m2x4_t, 0)
 DEF_RVV_TUPLE_OPS (vuint16m2x4_t, 0)
 DEF_RVV_TUPLE_OPS (vint16m4x2_t, 0)
 DEF_RVV_TUPLE_OPS (vuint16m4x2_t, 0)
-DEF_RVV_TUPLE_OPS (vint32mf2x2_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint32mf2x2_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint32mf2x3_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint32mf2x3_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint32mf2x4_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint32mf2x4_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint32mf2x5_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint32mf2x5_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint32mf2x6_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint32mf2x6_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint32mf2x7_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint32mf2x7_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vint32mf2x8_t, RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vuint32mf2x8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_TUPLE_OPS (vint32mf2x2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint32mf2x2_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint32mf2x3_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint32mf2x3_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint32mf2x4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint32mf2x4_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint32mf2x5_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint32mf2x5_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint32mf2x6_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint32mf2x6_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint32mf2x7_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint32mf2x7_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vint32mf2x8_t, RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vuint32mf2x8_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_TUPLE_OPS (vint32m1x2_t, 0)
 DEF_RVV_TUPLE_OPS (vuint32m1x2_t, 0)
 DEF_RVV_TUPLE_OPS (vint32m1x3_t, 0)
@@ -1361,13 +1361,13 @@ DEF_RVV_TUPLE_OPS (vint64m2x4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_TUPLE_OPS (vuint64m2x4_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_TUPLE_OPS (vint64m4x2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_TUPLE_OPS (vuint64m4x2_t, RVV_REQUIRE_ELEN_64)
-DEF_RVV_TUPLE_OPS (vbfloat16mf4x2_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vbfloat16mf4x3_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vbfloat16mf4x4_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vbfloat16mf4x5_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vbfloat16mf4x6_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vbfloat16mf4x7_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vbfloat16mf4x8_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_TUPLE_OPS (vbfloat16mf4x2_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vbfloat16mf4x3_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vbfloat16mf4x4_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vbfloat16mf4x5_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vbfloat16mf4x6_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vbfloat16mf4x7_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vbfloat16mf4x8_t, RVV_REQUIRE_ELEN_BF_16 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_TUPLE_OPS (vbfloat16mf2x2_t, RVV_REQUIRE_ELEN_BF_16)
 DEF_RVV_TUPLE_OPS (vbfloat16mf2x3_t, RVV_REQUIRE_ELEN_BF_16)
 DEF_RVV_TUPLE_OPS (vbfloat16mf2x4_t, RVV_REQUIRE_ELEN_BF_16)
@@ -1386,13 +1386,13 @@ DEF_RVV_TUPLE_OPS (vbfloat16m2x2_t, 
RVV_REQUIRE_ELEN_BF_16)
 DEF_RVV_TUPLE_OPS (vbfloat16m2x3_t, RVV_REQUIRE_ELEN_BF_16)
 DEF_RVV_TUPLE_OPS (vbfloat16m2x4_t, RVV_REQUIRE_ELEN_BF_16)
 DEF_RVV_TUPLE_OPS (vbfloat16m4x2_t, RVV_REQUIRE_ELEN_BF_16)
-DEF_RVV_TUPLE_OPS (vfloat16mf4x2_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat16mf4x3_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat16mf4x4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat16mf4x5_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat16mf4x6_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat16mf4x7_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat16mf4x8_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_TUPLE_OPS (vfloat16mf4x2_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat16mf4x3_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat16mf4x4_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat16mf4x5_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat16mf4x6_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat16mf4x7_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat16mf4x8_t, RVV_REQUIRE_ELEN_FP_16 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_TUPLE_OPS (vfloat16mf2x2_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_TUPLE_OPS (vfloat16mf2x3_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_TUPLE_OPS (vfloat16mf2x4_t, RVV_REQUIRE_ELEN_FP_16)
@@ -1411,13 +1411,13 @@ DEF_RVV_TUPLE_OPS (vfloat16m2x2_t, 
RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_TUPLE_OPS (vfloat16m2x3_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_TUPLE_OPS (vfloat16m2x4_t, RVV_REQUIRE_ELEN_FP_16)
 DEF_RVV_TUPLE_OPS (vfloat16m4x2_t, RVV_REQUIRE_ELEN_FP_16)
-DEF_RVV_TUPLE_OPS (vfloat32mf2x2_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat32mf2x3_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat32mf2x4_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat32mf2x5_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat32mf2x6_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat32mf2x7_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_MIN_VLEN_64)
-DEF_RVV_TUPLE_OPS (vfloat32mf2x8_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_TUPLE_OPS (vfloat32mf2x2_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat32mf2x3_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat32mf2x4_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat32mf2x5_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat32mf2x6_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat32mf2x7_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_ELEN_64)
+DEF_RVV_TUPLE_OPS (vfloat32mf2x8_t, RVV_REQUIRE_ELEN_FP_32 | 
RVV_REQUIRE_ELEN_64)
 DEF_RVV_TUPLE_OPS (vfloat32m1x2_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_TUPLE_OPS (vfloat32m1x3_t, RVV_REQUIRE_ELEN_FP_32)
 DEF_RVV_TUPLE_OPS (vfloat32m1x4_t, RVV_REQUIRE_ELEN_FP_32)
@@ -1441,7 +1441,7 @@ DEF_RVV_TUPLE_OPS (vfloat64m2x3_t, RVV_REQUIRE_ELEN_FP_64)
 DEF_RVV_TUPLE_OPS (vfloat64m2x4_t, RVV_REQUIRE_ELEN_FP_64)
 DEF_RVV_TUPLE_OPS (vfloat64m4x2_t, RVV_REQUIRE_ELEN_FP_64)
 
-DEF_RVV_CRYPTO_SEW32_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_CRYPTO_SEW32_OPS (vuint32mf2_t, RVV_REQUIRE_ELEN_64)
 DEF_RVV_CRYPTO_SEW32_OPS (vuint32m1_t, 0)
 DEF_RVV_CRYPTO_SEW32_OPS (vuint32m2_t, 0)
 DEF_RVV_CRYPTO_SEW32_OPS (vuint32m4_t, 0)
diff --git a/gcc/config/riscv/riscv-vector-switch.def 
b/gcc/config/riscv/riscv-vector-switch.def
index 23744d076f9..1b0d61940a6 100644
--- a/gcc/config/riscv/riscv-vector-switch.def
+++ b/gcc/config/riscv/riscv-vector-switch.def
@@ -64,13 +64,13 @@ Encode the ratio of SEW/LMUL into the mask types.
   |BI   |RVVM1BI|RVVMF2BI|RVVMF4BI|RVVMF8BI|RVVMF16BI|RVVMF32BI|RVVMF64BI|  */
 
 /* Return 'REQUIREMENT' for machine_mode 'MODE'.
-   For example: 'MODE' = RVVMF64BImode needs TARGET_MIN_VLEN > 32.  */
+   For example: 'MODE' = RVVMF64BImode needs TARGET_VECTOR_ELEN_64.  */
 #ifndef ENTRY
 #define ENTRY(MODE, REQUIREMENT, VLMUL, RATIO)
 #endif
 
 /* Disable modes if TARGET_MIN_VLEN == 32.  */
-ENTRY (RVVMF64BI, TARGET_MIN_VLEN > 32, TARGET_XTHEADVECTOR ? LMUL_1 :LMUL_F8, 
64)
+ENTRY (RVVMF64BI, TARGET_VECTOR_ELEN_64, TARGET_XTHEADVECTOR ? LMUL_1 
:LMUL_F8, 64)
 ENTRY (RVVMF32BI, true, TARGET_XTHEADVECTOR ? LMUL_1 :LMUL_F4, 32)
 ENTRY (RVVMF16BI, true, TARGET_XTHEADVECTOR ? LMUL_1 : LMUL_F2 , 16)
 ENTRY (RVVMF8BI, true, LMUL_1, 8)
@@ -85,7 +85,7 @@ ENTRY (RVVM2QI, true, LMUL_2, 4)
 ENTRY (RVVM1QI, true, LMUL_1, 8)
 ENTRY (RVVMF2QI, !TARGET_XTHEADVECTOR, LMUL_F2, 16)
 ENTRY (RVVMF4QI, !TARGET_XTHEADVECTOR, LMUL_F4, 32)
-ENTRY (RVVMF8QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F8, 64)
+ENTRY (RVVMF8QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, LMUL_F8, 64)
 
 /* Disable modes if TARGET_MIN_VLEN == 32.  */
 ENTRY (RVVM8HI, true, LMUL_8, 2)
@@ -93,7 +93,7 @@ ENTRY (RVVM4HI, true, LMUL_4, 4)
 ENTRY (RVVM2HI, true, LMUL_2, 8)
 ENTRY (RVVM1HI, true, LMUL_1, 16)
 ENTRY (RVVMF2HI, !TARGET_XTHEADVECTOR, LMUL_F2, 32)
-ENTRY (RVVMF4HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F4, 64)
+ENTRY (RVVMF4HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, LMUL_F4, 64)
 
 /* Disable modes if TARGET_MIN_VLEN == 32 or !TARGET_VECTOR_ELEN_BF_16.  */
 ENTRY (RVVM8BF, TARGET_VECTOR_ELEN_BF_16, LMUL_8, 2)
@@ -109,21 +109,21 @@ ENTRY (RVVM4HF, TARGET_VECTOR_ELEN_FP_16, LMUL_4, 4)
 ENTRY (RVVM2HF, TARGET_VECTOR_ELEN_FP_16, LMUL_2, 8)
 ENTRY (RVVM1HF, TARGET_VECTOR_ELEN_FP_16, LMUL_1, 16)
 ENTRY (RVVMF2HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, LMUL_F2, 32)
-ENTRY (RVVMF4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && 
!TARGET_XTHEADVECTOR, LMUL_F4, 64)
+ENTRY (RVVMF4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && 
!TARGET_XTHEADVECTOR, LMUL_F4, 64)
 
 /* Disable modes if TARGET_MIN_VLEN == 32.  */
 ENTRY (RVVM8SI, true, LMUL_8, 4)
 ENTRY (RVVM4SI, true, LMUL_4, 8)
 ENTRY (RVVM2SI, true, LMUL_2, 16)
 ENTRY (RVVM1SI, true, LMUL_1, 32)
-ENTRY (RVVMF2SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F2, 64)
+ENTRY (RVVMF2SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, LMUL_F2, 64)
 
 /* Disable modes if TARGET_MIN_VLEN == 32 or !TARGET_VECTOR_ELEN_FP_32.  */
 ENTRY (RVVM8SF, TARGET_VECTOR_ELEN_FP_32, LMUL_8, 4)
 ENTRY (RVVM4SF, TARGET_VECTOR_ELEN_FP_32, LMUL_4, 8)
 ENTRY (RVVM2SF, TARGET_VECTOR_ELEN_FP_32, LMUL_2, 16)
 ENTRY (RVVM1SF, TARGET_VECTOR_ELEN_FP_32, LMUL_1, 32)
-ENTRY (RVVMF2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && 
!TARGET_XTHEADVECTOR, LMUL_F2, 64)
+ENTRY (RVVMF2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && 
!TARGET_XTHEADVECTOR, LMUL_F2, 64)
 
 /* Disable modes if !TARGET_VECTOR_ELEN_64.  */
 ENTRY (RVVM8DI, TARGET_VECTOR_ELEN_64, LMUL_8, 8)
@@ -152,61 +152,61 @@ ENTRY (RVVM1DF, TARGET_VECTOR_ELEN_FP_64, LMUL_1, 64)
 TUPLE_ENTRY (RVVM1x8QI, true, RVVM1QI, 8, LMUL_1, 8)
 TUPLE_ENTRY (RVVMF2x8QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 8, LMUL_F2, 16)
 TUPLE_ENTRY (RVVMF4x8QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 8, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x8QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF8QI, 8, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF8x8QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF8QI, 8, LMUL_F8, 64)
 TUPLE_ENTRY (RVVM1x7QI, true, RVVM1QI, 7, LMUL_1, 8)
 TUPLE_ENTRY (RVVMF2x7QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 7, LMUL_F2, 16)
 TUPLE_ENTRY (RVVMF4x7QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 7, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x7QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF8QI, 7, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF8x7QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF8QI, 7, LMUL_F8, 64)
 TUPLE_ENTRY (RVVM1x6QI, true, RVVM1QI, 6, LMUL_1, 8)
 TUPLE_ENTRY (RVVMF2x6QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 6, LMUL_F2, 16)
 TUPLE_ENTRY (RVVMF4x6QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 6, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x6QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF8QI, 6, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF8x6QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF8QI, 6, LMUL_F8, 64)
 TUPLE_ENTRY (RVVM1x5QI, true, RVVM1QI, 5, LMUL_1, 8)
 TUPLE_ENTRY (RVVMF2x5QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 5, LMUL_F2, 16)
 TUPLE_ENTRY (RVVMF4x5QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 5, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x5QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF8QI, 5, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF8x5QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF8QI, 5, LMUL_F8, 64)
 TUPLE_ENTRY (RVVM2x4QI, true, RVVM2QI, 4, LMUL_2, 4)
 TUPLE_ENTRY (RVVM1x4QI, true, RVVM1QI, 4, LMUL_1, 8)
 TUPLE_ENTRY (RVVMF2x4QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 4, LMUL_F2, 16)
 TUPLE_ENTRY (RVVMF4x4QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 4, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x4QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF8QI, 4, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF8x4QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF8QI, 4, LMUL_F8, 64)
 TUPLE_ENTRY (RVVM2x3QI, true, RVVM2QI, 3, LMUL_2, 4)
 TUPLE_ENTRY (RVVM1x3QI, true, RVVM1QI, 3, LMUL_1, 8)
 TUPLE_ENTRY (RVVMF2x3QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 3, LMUL_F2, 16)
 TUPLE_ENTRY (RVVMF4x3QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 3, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x3QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF8QI, 3, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF8x3QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF8QI, 3, LMUL_F8, 64)
 TUPLE_ENTRY (RVVM4x2QI, true, RVVM4QI, 2, LMUL_4, 2)
 TUPLE_ENTRY (RVVM2x2QI, true, RVVM2QI, 2, LMUL_2, 4)
 TUPLE_ENTRY (RVVM1x2QI, true, RVVM1QI, 2, LMUL_1, 8)
 TUPLE_ENTRY (RVVMF2x2QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 2, LMUL_F2, 16)
 TUPLE_ENTRY (RVVMF4x2QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 2, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x2QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF8QI, 2, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF8x2QI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF8QI, 2, LMUL_F8, 64)
 
 TUPLE_ENTRY (RVVM1x8HI, true, RVVM1HI, 8, LMUL_1, 16)
 TUPLE_ENTRY (RVVMF2x8HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 8, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x8HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF4HI, 8, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x8HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF4HI, 8, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM1x7HI, true, RVVM1HI, 7, LMUL_1, 16)
 TUPLE_ENTRY (RVVMF2x7HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 7, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x7HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF4HI, 7, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x7HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF4HI, 7, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM1x6HI, true, RVVM1HI, 6, LMUL_1, 16)
 TUPLE_ENTRY (RVVMF2x6HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 6, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x6HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF4HI, 6, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x6HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF4HI, 6, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM1x5HI, true, RVVM1HI, 5, LMUL_1, 16)
 TUPLE_ENTRY (RVVMF2x5HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 5, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x5HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF4HI, 5, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x5HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF4HI, 5, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM2x4HI, true, RVVM2HI, 4, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x4HI, true, RVVM1HI, 4, LMUL_1, 16)
 TUPLE_ENTRY (RVVMF2x4HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 4, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x4HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF4HI, 4, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x4HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF4HI, 4, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM2x3HI, true, RVVM2HI, 3, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x3HI, true, RVVM1HI, 3, LMUL_1, 16)
 TUPLE_ENTRY (RVVMF2x3HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 3, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x3HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF4HI, 3, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x3HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF4HI, 3, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM4x2HI, true, RVVM4HI, 2, LMUL_4, 4)
 TUPLE_ENTRY (RVVM2x2HI, true, RVVM2HI, 2, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x2HI, true, RVVM1HI, 2, LMUL_1, 16)
 TUPLE_ENTRY (RVVMF2x2HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 2, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x2HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF4HI, 2, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x2HI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF4HI, 2, LMUL_F4, 64)
 
 TUPLE_ENTRY (RVVM1x8BF, TARGET_VECTOR_ELEN_BF_16, RVVM1BF, 8, LMUL_1, 16)
 TUPLE_ENTRY (RVVMF2x8BF, TARGET_VECTOR_ELEN_BF_16, RVVMF2BF, 8, LMUL_F2, 32)
@@ -236,67 +236,67 @@ TUPLE_ENTRY (RVVMF4x2BF, TARGET_VECTOR_ELEN_BF_16 && 
TARGET_MIN_VLEN > 32, RVVMF
 
 TUPLE_ENTRY (RVVM1x8HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 8, LMUL_1, 16)
 TUPLE_ENTRY (RVVMF2x8HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, 
RVVMF2HF, 8, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && 
!TARGET_XTHEADVECTOR, RVVMF4HF, 8, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && 
!TARGET_XTHEADVECTOR, RVVMF4HF, 8, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM1x7HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 7, LMUL_1, 16)
 TUPLE_ENTRY (RVVMF2x7HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, 
RVVMF2HF, 7, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x7HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && 
!TARGET_XTHEADVECTOR, RVVMF4HF, 7, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x7HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && 
!TARGET_XTHEADVECTOR, RVVMF4HF, 7, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM1x6HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 6, LMUL_1, 16)
 TUPLE_ENTRY (RVVMF2x6HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, 
RVVMF2HF, 6, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x6HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && 
!TARGET_XTHEADVECTOR, RVVMF4HF, 6, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x6HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && 
!TARGET_XTHEADVECTOR, RVVMF4HF, 6, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM1x5HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 5, LMUL_1, 16)
 TUPLE_ENTRY (RVVMF2x5HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, 
RVVMF2HF, 5, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x5HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && 
!TARGET_XTHEADVECTOR, RVVMF4HF, 5, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x5HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && 
!TARGET_XTHEADVECTOR, RVVMF4HF, 5, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM2x4HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 4, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x4HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 4, LMUL_1, 16)
 TUPLE_ENTRY (RVVMF2x4HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, 
RVVMF2HF, 4, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && 
!TARGET_XTHEADVECTOR, RVVMF4HF, 4, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && 
!TARGET_XTHEADVECTOR, RVVMF4HF, 4, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM2x3HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 3, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x3HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 3, LMUL_1, 16)
 TUPLE_ENTRY (RVVMF2x3HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, 
RVVMF2HF, 3, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x3HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && 
!TARGET_XTHEADVECTOR, RVVMF4HF, 3, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x3HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && 
!TARGET_XTHEADVECTOR, RVVMF4HF, 3, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM4x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM4HF, 2, LMUL_4, 4)
 TUPLE_ENTRY (RVVM2x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 2, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 2, LMUL_1, 16)
 TUPLE_ENTRY (RVVMF2x2HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, 
RVVMF2HF, 2, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x2HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && 
!TARGET_XTHEADVECTOR, RVVMF4HF, 2, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF4x2HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && 
!TARGET_XTHEADVECTOR, RVVMF4HF, 2, LMUL_F4, 64)
 
 TUPLE_ENTRY (RVVM1x8SI, true, RVVM1SI, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8SI, (TARGET_MIN_VLEN > 32) && !TARGET_XTHEADVECTOR, 
RVVMF2SI, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x8SI, (TARGET_VECTOR_ELEN_64) && !TARGET_XTHEADVECTOR, 
RVVMF2SI, 8, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM1x7SI, true, RVVM1SI, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7SI, (TARGET_MIN_VLEN > 32) && !TARGET_XTHEADVECTOR, 
RVVMF2SI, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x7SI, (TARGET_VECTOR_ELEN_64) && !TARGET_XTHEADVECTOR, 
RVVMF2SI, 7, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM1x6SI, true, RVVM1SI, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF2SI, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x6SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF2SI, 6, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM1x5SI, true, RVVM1SI, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF2SI, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x5SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF2SI, 5, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM2x4SI, true, RVVM2SI, 4, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x4SI, true, RVVM1SI, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF2SI, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x4SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF2SI, 4, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM2x3SI, true, RVVM2SI, 3, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x3SI, true, RVVM1SI, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF2SI, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x3SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF2SI, 3, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM4x2SI, true, RVVM4SI, 2, LMUL_4, 4)
 TUPLE_ENTRY (RVVM2x2SI, true, RVVM2SI, 2, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x2SI, true, RVVM1SI, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, 
RVVMF2SI, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x2SI, TARGET_VECTOR_ELEN_64 && !TARGET_XTHEADVECTOR, 
RVVMF2SI, 2, LMUL_F2, 32)
 
 TUPLE_ENTRY (RVVM1x8SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && 
!TARGET_XTHEADVECTOR, RVVMF2SF, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && 
!TARGET_XTHEADVECTOR, RVVMF2SF, 8, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM1x7SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && 
!TARGET_XTHEADVECTOR, RVVMF2SF, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x7SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && 
!TARGET_XTHEADVECTOR, RVVMF2SF, 7, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM1x6SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && 
!TARGET_XTHEADVECTOR, RVVMF2SF, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x6SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && 
!TARGET_XTHEADVECTOR, RVVMF2SF, 6, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM1x5SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && 
!TARGET_XTHEADVECTOR, RVVMF2SF, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x5SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && 
!TARGET_XTHEADVECTOR, RVVMF2SF, 5, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM2x4SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 4, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x4SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && 
!TARGET_XTHEADVECTOR, RVVMF2SF, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && 
!TARGET_XTHEADVECTOR, RVVMF2SF, 4, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM2x3SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 3, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x3SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && 
!TARGET_XTHEADVECTOR, RVVMF2SF, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x3SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && 
!TARGET_XTHEADVECTOR, RVVMF2SF, 3, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM4x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM4SF, 2, LMUL_4, 4)
 TUPLE_ENTRY (RVVM2x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 2, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && 
!TARGET_XTHEADVECTOR, RVVMF2SF, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && 
!TARGET_XTHEADVECTOR, RVVMF2SF, 2, LMUL_F2, 32)
 
 TUPLE_ENTRY (RVVM1x8DI, TARGET_VECTOR_ELEN_64, RVVM1DI, 8, LMUL_1, 16)
 TUPLE_ENTRY (RVVM1x7DI, TARGET_VECTOR_ELEN_64, RVVM1DI, 7, LMUL_1, 16)
diff --git a/gcc/config/riscv/vector-iterators.md 
b/gcc/config/riscv/vector-iterators.md
index c1bd7397441..f8da71b1d65 100644
--- a/gcc/config/riscv/vector-iterators.md
+++ b/gcc/config/riscv/vector-iterators.md
@@ -128,9 +128,9 @@ (define_c_enum "unspecv" [
 
 ;; Subset of VI with fractional LMUL types
 (define_mode_iterator VI_FRAC [
-  RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
-  RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
-  (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
+  RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
+  (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 ])
 
 ;; Subset of VI with non-fractional LMUL types
@@ -154,10 +154,10 @@ (define_mode_iterator VI [ VI_NOFRAC (VI_FRAC 
"!TARGET_XTHEADVECTOR") ])
 (define_mode_iterator VF [
   (RVVM8HF "TARGET_ZVFH") (RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH")
   (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
-  (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+  (RVVMF4HF "TARGET_ZVFH && TARGET_VECTOR_ELEN_64")
 
   (RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") 
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
-  (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && 
TARGET_MIN_VLEN > 32")
+  (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && 
TARGET_VECTOR_ELEN_64")
 
   (RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
   (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
@@ -169,16 +169,16 @@ (define_mode_iterator VF_ZVFBF16 [
   (RVVM2BF "TARGET_VECTOR_ELEN_BF_16")
   (RVVM1BF "TARGET_VECTOR_ELEN_BF_16")
   (RVVMF2BF "TARGET_VECTOR_ELEN_BF_16")
-  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
+  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_64")
 ])
 
 (define_mode_iterator VF_ZVFHMIN [
   (RVVM8HF "TARGET_VECTOR_ELEN_FP_16") (RVVM4HF "TARGET_VECTOR_ELEN_FP_16") 
(RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
   (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
-  (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+  (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64")
 
   (RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") 
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
-  (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && 
TARGET_MIN_VLEN > 32")
+  (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && 
TARGET_VECTOR_ELEN_64")
 
   (RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
   (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
@@ -305,20 +305,20 @@ (define_mode_iterator VLSF_ZVFHMIN [
 ])
 
 (define_mode_iterator VEEWEXT2 [
-  RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+  RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
 
   (RVVM8BF "TARGET_VECTOR_ELEN_BF_16") (RVVM4BF "TARGET_VECTOR_ELEN_BF_16") 
(RVVM2BF "TARGET_VECTOR_ELEN_BF_16")
   (RVVM1BF "TARGET_VECTOR_ELEN_BF_16") (RVVMF2BF "TARGET_VECTOR_ELEN_BF_16")
-  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
+  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_64")
 
   (RVVM8HF "TARGET_VECTOR_ELEN_FP_16") (RVVM4HF "TARGET_VECTOR_ELEN_FP_16") 
(RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
   (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
-  (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+  (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64")
 
-  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 
   (RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") 
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
-  (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && 
TARGET_MIN_VLEN > 32")
+  (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && 
TARGET_VECTOR_ELEN_64")
 
   (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
   (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -328,10 +328,10 @@ (define_mode_iterator VEEWEXT2 [
 ])
 
 (define_mode_iterator VEEWEXT4 [
-  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 
   (RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") 
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
-  (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && 
TARGET_MIN_VLEN > 32")
+  (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && 
TARGET_VECTOR_ELEN_64")
 
   (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
   (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -349,68 +349,68 @@ (define_mode_iterator VEEWEXT8 [
 ])
 
 (define_mode_iterator VEEWTRUNC2 [
-  RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+  RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
 
-  RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+  RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
 
   (RVVM4BF "TARGET_VECTOR_ELEN_BF_16") (RVVM2BF "TARGET_VECTOR_ELEN_BF_16")
   (RVVM1BF "TARGET_VECTOR_ELEN_BF_16") (RVVMF2BF "TARGET_VECTOR_ELEN_BF_16")
-  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
+  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_64")
 
   (RVVM4HF "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
   (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
-  (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+  (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64")
 
   (RVVM4SI "TARGET_64BIT")
   (RVVM2SI "TARGET_64BIT")
   (RVVM1SI "TARGET_64BIT")
-  (RVVMF2SI "TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+  (RVVMF2SI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
 
   (RVVM4SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_64BIT")
   (RVVM2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_64BIT")
   (RVVM1SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_64BIT")
-  (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+  (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64 && 
TARGET_64BIT")
 ])
 
 (define_mode_iterator VEEWTRUNC4 [
-  RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+  RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
 
   (RVVM2HI "TARGET_64BIT")
   (RVVM1HI "TARGET_64BIT")
   (RVVMF2HI "TARGET_64BIT")
-  (RVVMF4HI "TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+  (RVVMF4HI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
 
   (RVVM2BF "TARGET_VECTOR_ELEN_BF_16")
   (RVVM1BF "TARGET_VECTOR_ELEN_BF_16")
   (RVVMF2BF "TARGET_VECTOR_ELEN_BF_16")
-  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_64 && 
TARGET_64BIT")
 
   (RVVM2HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
   (RVVM1HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
   (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
-  (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+  (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64 && 
TARGET_64BIT")
 ])
 
 (define_mode_iterator VEEWTRUNC8 [
   (RVVM1QI "TARGET_64BIT")
   (RVVMF2QI "TARGET_64BIT")
   (RVVMF4QI "TARGET_64BIT")
-  (RVVMF8QI "TARGET_MIN_VLEN > 32 && TARGET_64BIT")
+  (RVVMF8QI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
 ])
 
 (define_mode_iterator VEI16 [
-  RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+  RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
 
-  RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+  RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
 
   (RVVM8HF "TARGET_VECTOR_ELEN_FP_16") (RVVM4HF "TARGET_VECTOR_ELEN_FP_16") 
(RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
   (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
-  (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
+  (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64")
 
-  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 
   (RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") 
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
-  (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && 
TARGET_MIN_VLEN > 32")
+  (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && 
TARGET_VECTOR_ELEN_64")
 
   (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
   (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -499,11 +499,11 @@ (define_mode_iterator VEI16 [
 ])
 
 (define_mode_iterator VFULLI [
-  RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN 
> 32")
+  RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI 
"TARGET_VECTOR_ELEN_64")
 
-  RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+  RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
 
-  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 
   (RVVM8DI "TARGET_FULL_V") (RVVM4DI "TARGET_FULL_V") (RVVM2DI 
"TARGET_FULL_V") (RVVM1DI "TARGET_FULL_V")
 
@@ -556,17 +556,17 @@ (define_mode_iterator VFULLI [
 ])
 
 (define_mode_iterator VI_QH [
-  RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN 
> 32")
+  RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI 
"TARGET_VECTOR_ELEN_64")
 
-  RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+  RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
 ])
 
 (define_mode_iterator VI_QHS [
-  RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN 
> 32")
+  RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI 
"TARGET_VECTOR_ELEN_64")
 
-  RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+  RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
 
-  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 
   (V1QI "riscv_vector::vls_mode_valid_p (V1QImode)")
   (V2QI "riscv_vector::vls_mode_valid_p (V2QImode)")
@@ -607,11 +607,11 @@ (define_mode_iterator VI_QHS [
 ])
 
 (define_mode_iterator VI_QHS_NO_M8 [
-  RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+  RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
 
-  RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+  RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
 
-  RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 
   (V1QI "riscv_vector::vls_mode_valid_p (V1QImode)")
   (V2QI "riscv_vector::vls_mode_valid_p (V2QImode)")
@@ -651,10 +651,10 @@ (define_mode_iterator VI_QHS_NO_M8 [
 (define_mode_iterator VF_HS [
   (RVVM8HF "TARGET_ZVFH") (RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH")
   (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
-  (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+  (RVVMF4HF "TARGET_ZVFH && TARGET_VECTOR_ELEN_64")
 
   (RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") 
(RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
-  (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && 
TARGET_MIN_VLEN > 32")
+  (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && 
TARGET_VECTOR_ELEN_64")
 
   (V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_ZVFH")
   (V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_ZVFH")
@@ -686,11 +686,11 @@ (define_mode_iterator VF_HS_NO_M8 [
   (RVVM2HF "TARGET_ZVFH")
   (RVVM1HF "TARGET_ZVFH")
   (RVVMF2HF "TARGET_ZVFH")
-  (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+  (RVVMF4HF "TARGET_ZVFH && TARGET_VECTOR_ELEN_64")
   (RVVM4SF "TARGET_VECTOR_ELEN_FP_32")
   (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
   (RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
-  (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+  (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
 
   (V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_ZVFH")
   (V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_ZVFH")
@@ -721,11 +721,11 @@ (define_mode_iterator VF_HS_M8 [
 ])
 
 (define_mode_iterator V_VLSI_QHS [
-  RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN 
> 32")
+  RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI 
"TARGET_VECTOR_ELEN_64")
 
-  RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+  RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
 
-  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 
   (V1QI "riscv_vector::vls_mode_valid_p (V1QImode)")
   (V2QI "riscv_vector::vls_mode_valid_p (V2QImode)")
@@ -803,13 +803,13 @@ (define_mode_iterator VFULLI_D [
 ;; E.g. when index mode = RVVM8QImode and Pmode = SImode, if it is not 
zero_extend or
 ;; scalar != 1, such gather/scatter is not allowed since we don't have 
RVVM32SImode.
 (define_mode_iterator RATIO64 [
-  (RVVMF8QI "TARGET_MIN_VLEN > 32")
-  (RVVMF4HI "TARGET_MIN_VLEN > 32")
-  (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  (RVVMF8QI "TARGET_VECTOR_ELEN_64")
+  (RVVMF4HI "TARGET_VECTOR_ELEN_64")
+  (RVVMF2SI "TARGET_VECTOR_ELEN_64")
   (RVVM1DI "TARGET_VECTOR_ELEN_64")
-  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
-  (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
-  (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_64")
+  (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_64")
+  (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
   (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
 ])
 
@@ -867,9 +867,9 @@ (define_mode_iterator RATIO1 [
 ])
 
 (define_mode_iterator RATIO64I [
-  (RVVMF8QI "TARGET_MIN_VLEN > 32")
-  (RVVMF4HI "TARGET_MIN_VLEN > 32")
-  (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  (RVVMF8QI "TARGET_VECTOR_ELEN_64")
+  (RVVMF4HI "TARGET_VECTOR_ELEN_64")
+  (RVVMF2SI "TARGET_VECTOR_ELEN_64")
   (RVVM1DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
 ])
 
@@ -929,23 +929,23 @@ (define_mode_iterator V_WHOLE [
 ])
 
 (define_mode_iterator V_FRACT [
-  RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+  RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
 
-  RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+  RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
 
-  (RVVMF2BF "TARGET_VECTOR_ELEN_BF_16") (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && 
TARGET_MIN_VLEN > 32")
+  (RVVMF2BF "TARGET_VECTOR_ELEN_BF_16") (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && 
TARGET_VECTOR_ELEN_64")
 
-  (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && 
TARGET_MIN_VLEN > 32")
+  (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && 
TARGET_VECTOR_ELEN_64")
 
-  (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 
-  (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+  (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
 ])
 
 (define_mode_iterator VWEXTI [
-  RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+  RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
 
-  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 
   (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
   (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -991,7 +991,7 @@ (define_mode_iterator VWEXTF_ZVFHMIN [
   (RVVM4SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32")
   (RVVM2SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32")
   (RVVM1SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32")
-  (RVVMF2SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && 
TARGET_MIN_VLEN > 32")
+  (RVVMF2SF "TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && 
TARGET_VECTOR_ELEN_64")
 
   (RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
   (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
@@ -1024,7 +1024,7 @@ (define_mode_iterator VWEXTF [
   (RVVM4SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32")
   (RVVM2SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32")
   (RVVM1SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32")
-  (RVVMF2SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+  (RVVMF2SF "TARGET_ZVFH && TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
 
   (RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
   (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
@@ -1103,7 +1103,7 @@ (define_mode_iterator VWWCONVERTI [
 ])
 
 (define_mode_iterator VQEXTI [
-  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 
   (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
   (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
@@ -1164,27 +1164,27 @@ (define_mode_iterator VOEXTI [
 ])
 
 (define_mode_iterator V1T [
-  (RVVMF8x2QI "TARGET_MIN_VLEN > 32")
-  (RVVMF8x3QI "TARGET_MIN_VLEN > 32")
-  (RVVMF8x4QI "TARGET_MIN_VLEN > 32")
-  (RVVMF8x5QI "TARGET_MIN_VLEN > 32")
-  (RVVMF8x6QI "TARGET_MIN_VLEN > 32")
-  (RVVMF8x7QI "TARGET_MIN_VLEN > 32")
-  (RVVMF8x8QI "TARGET_MIN_VLEN > 32")
-  (RVVMF4x2HI "TARGET_MIN_VLEN > 32")
-  (RVVMF4x3HI "TARGET_MIN_VLEN > 32")
-  (RVVMF4x4HI "TARGET_MIN_VLEN > 32")
-  (RVVMF4x5HI "TARGET_MIN_VLEN > 32")
-  (RVVMF4x6HI "TARGET_MIN_VLEN > 32")
-  (RVVMF4x7HI "TARGET_MIN_VLEN > 32")
-  (RVVMF4x8HI "TARGET_MIN_VLEN > 32")
-  (RVVMF2x2SI "TARGET_MIN_VLEN > 32")
-  (RVVMF2x3SI "TARGET_MIN_VLEN > 32")
-  (RVVMF2x4SI "TARGET_MIN_VLEN > 32")
-  (RVVMF2x5SI "TARGET_MIN_VLEN > 32")
-  (RVVMF2x6SI "TARGET_MIN_VLEN > 32")
-  (RVVMF2x7SI "TARGET_MIN_VLEN > 32")
-  (RVVMF2x8SI "TARGET_MIN_VLEN > 32")
+  (RVVMF8x2QI "TARGET_VECTOR_ELEN_64")
+  (RVVMF8x3QI "TARGET_VECTOR_ELEN_64")
+  (RVVMF8x4QI "TARGET_VECTOR_ELEN_64")
+  (RVVMF8x5QI "TARGET_VECTOR_ELEN_64")
+  (RVVMF8x6QI "TARGET_VECTOR_ELEN_64")
+  (RVVMF8x7QI "TARGET_VECTOR_ELEN_64")
+  (RVVMF8x8QI "TARGET_VECTOR_ELEN_64")
+  (RVVMF4x2HI "TARGET_VECTOR_ELEN_64")
+  (RVVMF4x3HI "TARGET_VECTOR_ELEN_64")
+  (RVVMF4x4HI "TARGET_VECTOR_ELEN_64")
+  (RVVMF4x5HI "TARGET_VECTOR_ELEN_64")
+  (RVVMF4x6HI "TARGET_VECTOR_ELEN_64")
+  (RVVMF4x7HI "TARGET_VECTOR_ELEN_64")
+  (RVVMF4x8HI "TARGET_VECTOR_ELEN_64")
+  (RVVMF2x2SI "TARGET_VECTOR_ELEN_64")
+  (RVVMF2x3SI "TARGET_VECTOR_ELEN_64")
+  (RVVMF2x4SI "TARGET_VECTOR_ELEN_64")
+  (RVVMF2x5SI "TARGET_VECTOR_ELEN_64")
+  (RVVMF2x6SI "TARGET_VECTOR_ELEN_64")
+  (RVVMF2x7SI "TARGET_VECTOR_ELEN_64")
+  (RVVMF2x8SI "TARGET_VECTOR_ELEN_64")
   (RVVM1x2DI "TARGET_VECTOR_ELEN_64")
   (RVVM1x3DI "TARGET_VECTOR_ELEN_64")
   (RVVM1x4DI "TARGET_VECTOR_ELEN_64")
@@ -1192,27 +1192,27 @@ (define_mode_iterator V1T [
   (RVVM1x6DI "TARGET_VECTOR_ELEN_64")
   (RVVM1x7DI "TARGET_VECTOR_ELEN_64")
   (RVVM1x8DI "TARGET_VECTOR_ELEN_64")
-  (RVVMF4x2BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
-  (RVVMF4x3BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
-  (RVVMF4x4BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
-  (RVVMF4x5BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
-  (RVVMF4x6BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
-  (RVVMF4x7BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
-  (RVVMF4x8BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
-  (RVVMF4x2HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
-  (RVVMF4x3HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
-  (RVVMF4x4HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
-  (RVVMF4x5HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
-  (RVVMF4x6HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
-  (RVVMF4x7HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
-  (RVVMF4x8HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
-  (RVVMF2x2SF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_32")
-  (RVVMF2x3SF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_32")
-  (RVVMF2x4SF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_32")
-  (RVVMF2x5SF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_32")
-  (RVVMF2x6SF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_32")
-  (RVVMF2x7SF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_32")
-  (RVVMF2x8SF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_32")
+  (RVVMF4x2BF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_BF_16")
+  (RVVMF4x3BF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_BF_16")
+  (RVVMF4x4BF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_BF_16")
+  (RVVMF4x5BF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_BF_16")
+  (RVVMF4x6BF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_BF_16")
+  (RVVMF4x7BF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_BF_16")
+  (RVVMF4x8BF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_BF_16")
+  (RVVMF4x2HF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_16")
+  (RVVMF4x3HF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_16")
+  (RVVMF4x4HF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_16")
+  (RVVMF4x5HF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_16")
+  (RVVMF4x6HF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_16")
+  (RVVMF4x7HF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_16")
+  (RVVMF4x8HF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_16")
+  (RVVMF2x2SF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
+  (RVVMF2x3SF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
+  (RVVMF2x4SF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
+  (RVVMF2x5SF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
+  (RVVMF2x6SF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
+  (RVVMF2x7SF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
+  (RVVMF2x8SF "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32")
   (RVVM1x2DF "TARGET_VECTOR_ELEN_FP_64")
   (RVVM1x3DF "TARGET_VECTOR_ELEN_FP_64")
   (RVVM1x4DF "TARGET_VECTOR_ELEN_FP_64")
@@ -1530,7 +1530,7 @@ (define_mode_iterator VLSB [
   (V4096BI "riscv_vector::vls_mode_valid_p (V4096BImode) && TARGET_MIN_VLEN >= 
4096")])
 
 (define_mode_iterator VB [
-  (RVVMF64BI "TARGET_MIN_VLEN > 32") RVVMF32BI RVVMF16BI RVVMF8BI RVVMF4BI 
RVVMF2BI RVVM1BI
+  (RVVMF64BI "TARGET_VECTOR_ELEN_64") RVVMF32BI RVVMF16BI RVVMF8BI RVVMF4BI 
RVVMF2BI RVVM1BI
 ])
 
 ;; Iterator for indexed loads and stores.  We must disallow 64-bit indices on
@@ -1539,11 +1539,11 @@ (define_mode_iterator VB [
 ;; VINDEXED [VI8 VI16 VI32 (VI64 "TARGET_64BIT")].
 
 (define_mode_iterator VINDEXED [
-  RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN 
> 32")
+  RVVM8QI RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI 
"TARGET_VECTOR_ELEN_64")
 
-  RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+  RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_VECTOR_ELEN_64")
 
-  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 
   (RVVM8DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
   (RVVM4DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
@@ -1555,15 +1555,15 @@ (define_mode_iterator VINDEXED [
   (RVVM2BF "TARGET_VECTOR_ELEN_BF_16")
   (RVVM1BF "TARGET_VECTOR_ELEN_BF_16")
   (RVVMF2BF "TARGET_VECTOR_ELEN_BF_16")
-  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
+  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_64")
 
   (RVVM8HF "TARGET_ZVFH") (RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH")
   (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
-  (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+  (RVVMF4HF "TARGET_ZVFH && TARGET_VECTOR_ELEN_64")
 
   (RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32")
   (RVVM2SF "TARGET_VECTOR_ELEN_FP_32") (RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
-  (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+  (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
 
   (RVVM8DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_64BIT")
   (RVVM4DF "TARGET_VECTOR_ELEN_FP_64 && TARGET_64BIT")
@@ -3392,11 +3392,11 @@ (define_mode_attr v_f2si_convert [
 
 (define_mode_iterator V_VLS_F_CONVERT_SI [
   (RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH") (RVVM1HF "TARGET_ZVFH")
-  (RVVMF2HF "TARGET_ZVFH") (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+  (RVVMF2HF "TARGET_ZVFH") (RVVMF4HF "TARGET_ZVFH && TARGET_VECTOR_ELEN_64")
 
   (RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32")
   (RVVM2SF "TARGET_VECTOR_ELEN_FP_32") (RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
-  (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+  (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
 
   (RVVM8DF "TARGET_VECTOR_ELEN_FP_64")
   (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
@@ -3510,11 +3510,11 @@ (define_mode_attr V_F2DI_CONVERT_BRIDGE [
 
 (define_mode_iterator V_VLS_F_CONVERT_DI [
   (RVVM2HF "TARGET_ZVFH") (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
-  (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
+  (RVVMF4HF "TARGET_ZVFH && TARGET_VECTOR_ELEN_64")
 
   (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
   (RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
-  (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+  (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_VECTOR_ELEN_64")
 
   (RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
   (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
@@ -4400,23 +4400,23 @@ (define_mode_iterator VLS_AVL_REG [
   (V4096BI "riscv_vector::vls_mode_valid_p (V4096BImode) && TARGET_MIN_VLEN >= 
4096")])
 
 (define_mode_iterator VSI [
-  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 ])
 
 (define_mode_iterator VLMULX2_SI [
-  RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 ])
 
 (define_mode_iterator VLMULX4_SI [
-  RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  RVVM2SI RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 ])
 
 (define_mode_iterator VLMULX8_SI [
-  RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  RVVM1SI (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 ])
 
 (define_mode_iterator VLMULX16_SI [
-  (RVVMF2SI "TARGET_MIN_VLEN > 32")
+  (RVVMF2SI "TARGET_VECTOR_ELEN_64")
 ])
 
 (define_mode_attr VSIX2 [
@@ -4854,7 +4854,7 @@ (define_mode_attr sf_vqmacc_dod [
 ])
 
 (define_mode_iterator SF_XF [
-  RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+  RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_VECTOR_ELEN_64")
 ])
 
 
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111391-2.c 
b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111391-2.c
index 1f170c962e1..32db3a68fd3 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111391-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr111391-2.c
@@ -3,7 +3,7 @@
 
 #include "pr111391-1.c"
 
-/* { dg-final { scan-assembler-times 
{vsetivli\s+zero,\s*2,\s*e32,\s*mf2,\s*t[au],\s*m[au]} 1 } }
+/* { dg-final { scan-assembler-times 
{vsetivli\s+zero,\s*2,\s*e32,\s*m1,\s*t[au],\s*m[au]} 1 } } */
 /* { dg-final { scan-assembler-times {vmv\.x\.s} 2 } } */
 /* { dg-final { scan-assembler-times {vslidedown.vi\s+v[0-9]+,\s*v[0-9]+,\s*1} 
1 } } */
 /* { dg-final { scan-assembler-times {slli\s+[a-x0-9]+,[a-x0-9]+,32} 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/abi-14.c 
b/gcc/testsuite/gcc.target/riscv/rvv/base/abi-14.c
index 163152ae923..222d8c233ab 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/abi-14.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/abi-14.c
@@ -1,20 +1,20 @@
 /* { dg-do compile } */
 /* { dg-options "-O3 -march=rv32gc_zve32x_zvl64b -mabi=ilp32d" } */
 
-void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;}
-void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;}
-void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;}
-void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;}
-void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;}
-void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;} 
-void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;}
-void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;}
-void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;}
-void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;}
-void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;}
-void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;}
-void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;}
-void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;}
+void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x2_t'} } */
+void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x2_t'} } */
+void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x3_t'} } */
+void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x3_t'} } */
+void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x4_t'} } */
+void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x4_t'} } */
+void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x5_t'} } */
+void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x5_t'} } */
+void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x6_t'} } */
+void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x6_t'} } */
+void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x7_t'} } */
+void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x7_t'} } */
+void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x8_t'} } */
+void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x8_t'} } */
 void f___rvv_int8mf4x2_t () {__rvv_int8mf4x2_t t;}
 void f___rvv_uint8mf4x2_t () {__rvv_uint8mf4x2_t t;}
 void f___rvv_int8mf4x3_t () {__rvv_int8mf4x3_t t;}
@@ -65,20 +65,20 @@ void f___rvv_int8m2x4_t () {__rvv_int8m2x4_t t;}
 void f___rvv_uint8m2x4_t () {__rvv_uint8m2x4_t t;}
 void f___rvv_int8m4x2_t () {__rvv_int8m4x2_t t;}
 void f___rvv_uint8m4x2_t () {__rvv_uint8m4x2_t t;}
-void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;}
-void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;}
-void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;}
-void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;}
-void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;}
-void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;}
-void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;}
-void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;}
-void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;}
-void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;}
-void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;}
-void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;}
-void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;}
-void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;}
+void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x2_t'} } */
+void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x2_t'} } */
+void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x3_t'} } */
+void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x3_t'} } */
+void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x4_t'} } */
+void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x4_t'} } */
+void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x5_t'} } */
+void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x5_t'} } */
+void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x6_t'} } */
+void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x6_t'} } */
+void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x7_t'} } */
+void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x7_t'} } */
+void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x8_t'} } */
+void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x8_t'} } */
 void f___rvv_int16mf2x2_t () {__rvv_int16mf2x2_t t;}
 void f___rvv_uint16mf2x2_t () {__rvv_uint16mf2x2_t t;}
 void f___rvv_int16mf2x3_t () {__rvv_int16mf2x3_t t;}
@@ -115,20 +115,20 @@ void f___rvv_int16m2x4_t () {__rvv_int16m2x4_t t;}
 void f___rvv_uint16m2x4_t () {__rvv_uint16m2x4_t t;}
 void f___rvv_int16m4x2_t () {__rvv_int16m4x2_t t;}
 void f___rvv_uint16m4x2_t () {__rvv_uint16m4x2_t t;}
-void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;}
-void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;}
-void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;}
-void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;}
-void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;}
-void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;}
-void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;}
-void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;}
-void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;}
-void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;}
-void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;}
-void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;}
-void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;}
-void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;}
+void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x2_t'} } */
+void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x2_t'} } */
+void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x3_t'} } */
+void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x3_t'} } */
+void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x4_t'} } */
+void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x4_t'} } */
+void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x5_t'} } */
+void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x5_t'} } */
+void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x6_t'} } */
+void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x6_t'} } */
+void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x7_t'} } */
+void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x7_t'} } */
+void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x8_t'} } */
+void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x8_t'} } */
 void f___rvv_int32m1x2_t () {__rvv_int32m1x2_t t;}
 void f___rvv_uint32m1x2_t () {__rvv_uint32m1x2_t t;}
 void f___rvv_int32m1x3_t () {__rvv_int32m1x3_t t;}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/abi-16.c 
b/gcc/testsuite/gcc.target/riscv/rvv/base/abi-16.c
index 9e962a70acf..2762b7a0e30 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/abi-16.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/abi-16.c
@@ -1,20 +1,20 @@
 /* { dg-do compile } */
 /* { dg-options "-O3 -march=rv32gc_zve32f_zvl64b -mabi=ilp32d" } */
 
-void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;}
-void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;}
-void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;}
-void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;}
-void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;}
-void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;} 
-void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;}
-void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;}
-void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;}
-void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;}
-void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;}
-void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;}
-void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;}
-void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;}
+void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x2_t'} } */
+void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x2_t'} } */
+void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x3_t'} } */
+void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x3_t'} } */
+void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x4_t'} } */
+void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x4_t'} } */
+void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x5_t'} } */
+void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x5_t'} } */
+void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x6_t'} } */
+void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x6_t'} } */
+void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x7_t'} } */
+void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x7_t'} } */
+void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x8_t'} } */
+void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x8_t'} } */
 void f___rvv_int8mf4x2_t () {__rvv_int8mf4x2_t t;}
 void f___rvv_uint8mf4x2_t () {__rvv_uint8mf4x2_t t;}
 void f___rvv_int8mf4x3_t () {__rvv_int8mf4x3_t t;}
@@ -65,20 +65,20 @@ void f___rvv_int8m2x4_t () {__rvv_int8m2x4_t t;}
 void f___rvv_uint8m2x4_t () {__rvv_uint8m2x4_t t;}
 void f___rvv_int8m4x2_t () {__rvv_int8m4x2_t t;}
 void f___rvv_uint8m4x2_t () {__rvv_uint8m4x2_t t;}
-void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;}
-void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;}
-void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;}
-void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;}
-void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;}
-void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;}
-void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;}
-void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;}
-void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;}
-void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;}
-void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;}
-void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;}
-void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;}
-void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;}
+void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x2_t'} } */
+void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x2_t'} } */
+void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x3_t'} } */
+void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x3_t'} } */
+void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x4_t'} } */
+void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x4_t'} } */
+void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x5_t'} } */
+void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x5_t'} } */
+void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x6_t'} } */
+void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x6_t'} } */
+void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x7_t'} } */
+void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x7_t'} } */
+void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x8_t'} } */
+void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x8_t'} } */
 void f___rvv_int16mf2x2_t () {__rvv_int16mf2x2_t t;}
 void f___rvv_uint16mf2x2_t () {__rvv_uint16mf2x2_t t;}
 void f___rvv_int16mf2x3_t () {__rvv_int16mf2x3_t t;}
@@ -115,20 +115,20 @@ void f___rvv_int16m2x4_t () {__rvv_int16m2x4_t t;}
 void f___rvv_uint16m2x4_t () {__rvv_uint16m2x4_t t;}
 void f___rvv_int16m4x2_t () {__rvv_int16m4x2_t t;}
 void f___rvv_uint16m4x2_t () {__rvv_uint16m4x2_t t;}
-void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;}
-void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;}
-void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;}
-void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;}
-void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;}
-void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;}
-void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;}
-void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;}
-void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;}
-void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;}
-void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;}
-void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;}
-void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;}
-void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;}
+void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x2_t'} } */
+void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x2_t'} } */
+void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x3_t'} } */
+void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x3_t'} } */
+void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x4_t'} } */
+void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x4_t'} } */
+void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x5_t'} } */
+void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x5_t'} } */
+void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x6_t'} } */
+void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x6_t'} } */
+void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x7_t'} } */
+void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x7_t'} } */
+void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x8_t'} } */
+void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x8_t'} } */
 void f___rvv_int32m1x2_t () {__rvv_int32m1x2_t t;}
 void f___rvv_uint32m1x2_t () {__rvv_uint32m1x2_t t;}
 void f___rvv_int32m1x3_t () {__rvv_int32m1x3_t t;}
@@ -179,13 +179,13 @@ void f___rvv_float16m1_t () {__rvv_float16m1_t t;} /* { 
dg-error {unknown type n
 void f___rvv_float16m2_t () {__rvv_float16m2_t t;} /* { dg-error {unknown type 
name '__rvv_float16m2_t'} } */
 void f___rvv_float16m4_t () {__rvv_float16m4_t t;} /* { dg-error {unknown type 
name '__rvv_float16m4_t'} } */
 void f___rvv_float16m8_t () {__rvv_float16m8_t t;} /* { dg-error {unknown type 
name '__rvv_float16m8_t'} } */
-void f___rvv_float32mf2x2_t () {__rvv_float32mf2x2_t t;}
-void f___rvv_float32mf2x3_t () {__rvv_float32mf2x3_t t;}
-void f___rvv_float32mf2x4_t () {__rvv_float32mf2x4_t t;}
-void f___rvv_float32mf2x5_t () {__rvv_float32mf2x5_t t;}
-void f___rvv_float32mf2x6_t () {__rvv_float32mf2x6_t t;}
-void f___rvv_float32mf2x7_t () {__rvv_float32mf2x7_t t;}
-void f___rvv_float32mf2x8_t () {__rvv_float32mf2x8_t t;}
+void f___rvv_float32mf2x2_t () {__rvv_float32mf2x2_t t;} /* { dg-error 
{unknown type name '__rvv_float32mf2x2_t'} } */
+void f___rvv_float32mf2x3_t () {__rvv_float32mf2x3_t t;} /* { dg-error 
{unknown type name '__rvv_float32mf2x3_t'} } */
+void f___rvv_float32mf2x4_t () {__rvv_float32mf2x4_t t;} /* { dg-error 
{unknown type name '__rvv_float32mf2x4_t'} } */
+void f___rvv_float32mf2x5_t () {__rvv_float32mf2x5_t t;} /* { dg-error 
{unknown type name '__rvv_float32mf2x5_t'} } */
+void f___rvv_float32mf2x6_t () {__rvv_float32mf2x6_t t;} /* { dg-error 
{unknown type name '__rvv_float32mf2x6_t'} } */
+void f___rvv_float32mf2x7_t () {__rvv_float32mf2x7_t t;} /* { dg-error 
{unknown type name '__rvv_float32mf2x7_t'} } */
+void f___rvv_float32mf2x8_t () {__rvv_float32mf2x8_t t;} /* { dg-error 
{unknown type name '__rvv_float32mf2x8_t'} } */
 void f___rvv_float32m1x2_t () {__rvv_float32m1x2_t t;}
 void f___rvv_float32m1x3_t () {__rvv_float32m1x3_t t;}
 void f___rvv_float32m1x4_t () {__rvv_float32m1x4_t t;}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/abi-18.c 
b/gcc/testsuite/gcc.target/riscv/rvv/base/abi-18.c
index 402e8f6ba22..95b760fa4e6 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/abi-18.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/abi-18.c
@@ -1,20 +1,20 @@
 /* { dg-do compile } */
 /* { dg-options "-O3 -march=rv32gc_zve32x_zvl64b_zvfhmin -mabi=ilp32d" } */
 
-void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;}
-void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;}
-void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;}
-void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;}
-void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;}
-void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;} 
-void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;}
-void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;}
-void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;}
-void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;}
-void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;}
-void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;}
-void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;}
-void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;}
+void f___rvv_int8mf8x2_t () {__rvv_int8mf8x2_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x2_t'} } */
+void f___rvv_uint8mf8x2_t () {__rvv_uint8mf8x2_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x2_t'} } */
+void f___rvv_int8mf8x3_t () {__rvv_int8mf8x3_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x3_t'} } */
+void f___rvv_uint8mf8x3_t () {__rvv_uint8mf8x3_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x3_t'} } */
+void f___rvv_int8mf8x4_t () {__rvv_int8mf8x4_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x4_t'} } */
+void f___rvv_uint8mf8x4_t () {__rvv_uint8mf8x4_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x4_t'} } */
+void f___rvv_int8mf8x5_t () {__rvv_int8mf8x5_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x5_t'} } */
+void f___rvv_uint8mf8x5_t () {__rvv_uint8mf8x5_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x5_t'} } */
+void f___rvv_int8mf8x6_t () {__rvv_int8mf8x6_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x6_t'} } */
+void f___rvv_uint8mf8x6_t () {__rvv_uint8mf8x6_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x6_t'} } */
+void f___rvv_int8mf8x7_t () {__rvv_int8mf8x7_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x7_t'} } */
+void f___rvv_uint8mf8x7_t () {__rvv_uint8mf8x7_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x7_t'} } */
+void f___rvv_int8mf8x8_t () {__rvv_int8mf8x8_t t;} /* { dg-error {unknown type 
name '__rvv_int8mf8x8_t'} } */
+void f___rvv_uint8mf8x8_t () {__rvv_uint8mf8x8_t t;} /* { dg-error {unknown 
type name '__rvv_uint8mf8x8_t'} } */
 void f___rvv_int8mf4x2_t () {__rvv_int8mf4x2_t t;}
 void f___rvv_uint8mf4x2_t () {__rvv_uint8mf4x2_t t;}
 void f___rvv_int8mf4x3_t () {__rvv_int8mf4x3_t t;}
@@ -65,20 +65,20 @@ void f___rvv_int8m2x4_t () {__rvv_int8m2x4_t t;}
 void f___rvv_uint8m2x4_t () {__rvv_uint8m2x4_t t;}
 void f___rvv_int8m4x2_t () {__rvv_int8m4x2_t t;}
 void f___rvv_uint8m4x2_t () {__rvv_uint8m4x2_t t;}
-void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;}
-void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;}
-void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;}
-void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;}
-void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;}
-void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;}
-void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;}
-void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;}
-void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;}
-void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;}
-void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;}
-void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;}
-void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;}
-void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;}
+void f___rvv_int16mf4x2_t () {__rvv_int16mf4x2_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x2_t'} } */
+void f___rvv_uint16mf4x2_t () {__rvv_uint16mf4x2_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x2_t'} } */
+void f___rvv_int16mf4x3_t () {__rvv_int16mf4x3_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x3_t'} } */
+void f___rvv_uint16mf4x3_t () {__rvv_uint16mf4x3_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x3_t'} } */
+void f___rvv_int16mf4x4_t () {__rvv_int16mf4x4_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x4_t'} } */
+void f___rvv_uint16mf4x4_t () {__rvv_uint16mf4x4_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x4_t'} } */
+void f___rvv_int16mf4x5_t () {__rvv_int16mf4x5_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x5_t'} } */
+void f___rvv_uint16mf4x5_t () {__rvv_uint16mf4x5_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x5_t'} } */
+void f___rvv_int16mf4x6_t () {__rvv_int16mf4x6_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x6_t'} } */
+void f___rvv_uint16mf4x6_t () {__rvv_uint16mf4x6_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x6_t'} } */
+void f___rvv_int16mf4x7_t () {__rvv_int16mf4x7_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x7_t'} } */
+void f___rvv_uint16mf4x7_t () {__rvv_uint16mf4x7_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x7_t'} } */
+void f___rvv_int16mf4x8_t () {__rvv_int16mf4x8_t t;} /* { dg-error {unknown 
type name '__rvv_int16mf4x8_t'} } */
+void f___rvv_uint16mf4x8_t () {__rvv_uint16mf4x8_t t;} /* { dg-error {unknown 
type name '__rvv_uint16mf4x8_t'} } */
 void f___rvv_int16mf2x2_t () {__rvv_int16mf2x2_t t;}
 void f___rvv_uint16mf2x2_t () {__rvv_uint16mf2x2_t t;}
 void f___rvv_int16mf2x3_t () {__rvv_int16mf2x3_t t;}
@@ -115,20 +115,20 @@ void f___rvv_int16m2x4_t () {__rvv_int16m2x4_t t;}
 void f___rvv_uint16m2x4_t () {__rvv_uint16m2x4_t t;}
 void f___rvv_int16m4x2_t () {__rvv_int16m4x2_t t;}
 void f___rvv_uint16m4x2_t () {__rvv_uint16m4x2_t t;}
-void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;}
-void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;}
-void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;}
-void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;}
-void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;}
-void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;}
-void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;}
-void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;}
-void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;}
-void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;}
-void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;}
-void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;}
-void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;}
-void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;}
+void f___rvv_int32mf2x2_t () {__rvv_int32mf2x2_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x2_t'} } */
+void f___rvv_uint32mf2x2_t () {__rvv_uint32mf2x2_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x2_t'} } */
+void f___rvv_int32mf2x3_t () {__rvv_int32mf2x3_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x3_t'} } */
+void f___rvv_uint32mf2x3_t () {__rvv_uint32mf2x3_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x3_t'} } */
+void f___rvv_int32mf2x4_t () {__rvv_int32mf2x4_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x4_t'} } */
+void f___rvv_uint32mf2x4_t () {__rvv_uint32mf2x4_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x4_t'} } */
+void f___rvv_int32mf2x5_t () {__rvv_int32mf2x5_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x5_t'} } */
+void f___rvv_uint32mf2x5_t () {__rvv_uint32mf2x5_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x5_t'} } */
+void f___rvv_int32mf2x6_t () {__rvv_int32mf2x6_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x6_t'} } */
+void f___rvv_uint32mf2x6_t () {__rvv_uint32mf2x6_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x6_t'} } */
+void f___rvv_int32mf2x7_t () {__rvv_int32mf2x7_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x7_t'} } */
+void f___rvv_uint32mf2x7_t () {__rvv_uint32mf2x7_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x7_t'} } */
+void f___rvv_int32mf2x8_t () {__rvv_int32mf2x8_t t;} /* { dg-error {unknown 
type name '__rvv_int32mf2x8_t'} } */
+void f___rvv_uint32mf2x8_t () {__rvv_uint32mf2x8_t t;} /* { dg-error {unknown 
type name '__rvv_uint32mf2x8_t'} } */
 void f___rvv_int32m1x2_t () {__rvv_int32m1x2_t t;}
 void f___rvv_uint32m1x2_t () {__rvv_uint32m1x2_t t;}
 void f___rvv_int32m1x3_t () {__rvv_int32m1x3_t t;}
@@ -173,13 +173,13 @@ void f___rvv_int64m2x4_t () {__rvv_int64m2x4_t t;} /* { 
dg-error {unknown type n
 void f___rvv_uint64m2x4_t () {__rvv_uint64m2x4_t t;} /* { dg-error {unknown 
type name '__rvv_uint64m2x4_t'} } */
 void f___rvv_int64m4x2_t () {__rvv_int64m4x2_t t;} /* { dg-error {unknown type 
name '__rvv_int64m4x2_t'} } */
 void f___rvv_uint64m4x2_t () {__rvv_uint64m4x2_t t;} /* { dg-error {unknown 
type name '__rvv_uint64m4x2_t'} } */
-void f___rvv_float16mf4x2_t () {__rvv_float16mf4x2_t t;}
-void f___rvv_float16mf4x3_t () {__rvv_float16mf4x3_t t;}
-void f___rvv_float16mf4x4_t () {__rvv_float16mf4x4_t t;}
-void f___rvv_float16mf4x5_t () {__rvv_float16mf4x5_t t;}
-void f___rvv_float16mf4x6_t () {__rvv_float16mf4x6_t t;}
-void f___rvv_float16mf4x7_t () {__rvv_float16mf4x7_t t;}
-void f___rvv_float16mf4x8_t () {__rvv_float16mf4x8_t t;}
+void f___rvv_float16mf4x2_t () {__rvv_float16mf4x2_t t;} /* { dg-error 
{unknown type name '__rvv_float16mf4x2_t'} } */
+void f___rvv_float16mf4x3_t () {__rvv_float16mf4x3_t t;} /* { dg-error 
{unknown type name '__rvv_float16mf4x3_t'} } */
+void f___rvv_float16mf4x4_t () {__rvv_float16mf4x4_t t;} /* { dg-error 
{unknown type name '__rvv_float16mf4x4_t'} } */
+void f___rvv_float16mf4x5_t () {__rvv_float16mf4x5_t t;} /* { dg-error 
{unknown type name '__rvv_float16mf4x5_t'} } */
+void f___rvv_float16mf4x6_t () {__rvv_float16mf4x6_t t;} /* { dg-error 
{unknown type name '__rvv_float16mf4x6_t'} } */
+void f___rvv_float16mf4x7_t () {__rvv_float16mf4x7_t t;} /* { dg-error 
{unknown type name '__rvv_float16mf4x7_t'} } */
+void f___rvv_float16mf4x8_t () {__rvv_float16mf4x8_t t;} /* { dg-error 
{unknown type name '__rvv_float16mf4x8_t'} } */
 void f___rvv_float16mf2x2_t () {__rvv_float16mf2x2_t t;}
 void f___rvv_float16mf2x3_t () {__rvv_float16mf2x3_t t;}
 void f___rvv_float16mf2x4_t () {__rvv_float16mf2x4_t t;}
@@ -198,13 +198,13 @@ void f___rvv_float16m2x2_t () {__rvv_float16m2x2_t t;}
 void f___rvv_float16m2x3_t () {__rvv_float16m2x3_t t;}
 void f___rvv_float16m2x4_t () {__rvv_float16m2x4_t t;}
 void f___rvv_float16m4x2_t () {__rvv_float16m4x2_t t;}
-void f___rvv_float32mf2x2_t () {__rvv_float32mf2x2_t t;}
-void f___rvv_float32mf2x3_t () {__rvv_float32mf2x3_t t;}
-void f___rvv_float32mf2x4_t () {__rvv_float32mf2x4_t t;}
-void f___rvv_float32mf2x5_t () {__rvv_float32mf2x5_t t;}
-void f___rvv_float32mf2x6_t () {__rvv_float32mf2x6_t t;}
-void f___rvv_float32mf2x7_t () {__rvv_float32mf2x7_t t;}
-void f___rvv_float32mf2x8_t () {__rvv_float32mf2x8_t t;}
+void f___rvv_float32mf2x2_t () {__rvv_float32mf2x2_t t;} /* { dg-error 
{unknown type name '__rvv_float32mf2x2_t'} } */
+void f___rvv_float32mf2x3_t () {__rvv_float32mf2x3_t t;} /* { dg-error 
{unknown type name '__rvv_float32mf2x3_t'} } */
+void f___rvv_float32mf2x4_t () {__rvv_float32mf2x4_t t;} /* { dg-error 
{unknown type name '__rvv_float32mf2x4_t'} } */
+void f___rvv_float32mf2x5_t () {__rvv_float32mf2x5_t t;} /* { dg-error 
{unknown type name '__rvv_float32mf2x5_t'} } */
+void f___rvv_float32mf2x6_t () {__rvv_float32mf2x6_t t;} /* { dg-error 
{unknown type name '__rvv_float32mf2x6_t'} } */
+void f___rvv_float32mf2x7_t () {__rvv_float32mf2x7_t t;} /* { dg-error 
{unknown type name '__rvv_float32mf2x7_t'} } */
+void f___rvv_float32mf2x8_t () {__rvv_float32mf2x8_t t;} /* { dg-error 
{unknown type name '__rvv_float32mf2x8_t'} } */
 void f___rvv_float32m1x2_t () {__rvv_float32m1x2_t t;}
 void f___rvv_float32m1x3_t () {__rvv_float32m1x3_t t;}
 void f___rvv_float32m1x4_t () {__rvv_float32m1x4_t t;}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsetvl_zve32-1.c 
b/gcc/testsuite/gcc.target/riscv/rvv/base/vsetvl_zve32-1.c
new file mode 100644
index 00000000000..f6899c3bc2f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsetvl_zve32-1.c
@@ -0,0 +1,73 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32imafc_zve32f_zvl128b -mabi=ilp32 -O2" } */
+
+struct S0
+{
+  unsigned a : 15;
+  int b;
+  int c;
+};
+
+struct S1
+{
+  struct S0 s0;
+  int e;
+};
+
+struct Z
+{
+  char c;
+  int z;
+} __attribute__((packed));
+
+union U
+{
+  struct S1 s1;
+  struct Z z;
+};
+
+int __attribute__((noinline, noclone))
+return_zero (void)
+{
+  return 0;
+}
+
+volatile union U gu;
+struct S0 gs;
+
+int __attribute__((noinline, noclone))
+check_outcome ()
+{
+  if (gs.a != 6
+      || gs.b != 80000)
+    __builtin_abort ();
+}
+
+int
+main (int argc, char *argv[])
+{
+  union U u;
+  struct S1 m;
+  struct S0 l;
+
+  if (return_zero ())
+    u.z.z = 20000;
+  else
+    {
+      u.s1.s0.a = 6;
+      u.s1.s0.b = 80000;
+      u.s1.e = 2;
+
+      m = u.s1;
+      m.s0.c = 0;
+      l = m.s0;
+      gs = l;
+    }
+
+  gu = u;
+  check_outcome ();
+  return 0;
+}
+
+/* { dg-final { scan-assembler 
{vsetivli\s+zero,\s*2,\s*e32,\s*m1,\s*t[au],\s*m[au]} } } */
+/* { dg-final { scan-assembler 
{vsetivli\s+zero,\s*4,\s*e32,\s*m1,\s*t[au],\s*m[au]} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsetvl_zve32-2.c 
b/gcc/testsuite/gcc.target/riscv/rvv/base/vsetvl_zve32-2.c
new file mode 100644
index 00000000000..dd81f8b3410
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsetvl_zve32-2.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64g_zve32x_zvl128b -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+typedef unsigned int V2SI __attribute__((vector_size(8)));
+
+V2SI v1, v2;
+
+/* Make sure we won't use mf2 mode even vector register is OK to hold for
+   ELEN=32.  */
+void foo1()
+{
+/*
+** foo1:
+**      ...
+**      vsetivli       zero,2,e32,m1,ta,ma
+**      ...
+**      vle32\.v       v[0-9]+,0\([a-x][0-9]+\)
+**      ...
+**      vse32\.v       v[0-9]+,0\([a-x][0-9]+\)
+**      ...
+**      ret
+*/
+  v1 = v2;
+}
-- 
2.34.1


Reply via email to