From: Liao Shihua <shi...@iscas.ac.cn>

This commit adds testcases for Xsfvcp.

Co-Authored by: Jiawei Chen <jia...@iscas.ac.cn>
Co-Authored by: Shihua Liao <shi...@iscas.ac.cn>
Co-Authored by: Yixuan Chen <chenyix...@iscas.ac.cn>

gcc/testsuite/ChangeLog:

        * gcc.target/riscv/rvv/xsfvector/sf_vc_f.c: New test.
        * gcc.target/riscv/rvv/xsfvector/sf_vc_i.c: New test.
        * gcc.target/riscv/rvv/xsfvector/sf_vc_v.c: New test.
        * gcc.target/riscv/rvv/xsfvector/sf_vc_x.c: New test.
---
 .../gcc.target/riscv/rvv/xsfvector/sf_vc_f.c  |  88 +++++++++++
 .../gcc.target/riscv/rvv/xsfvector/sf_vc_i.c  | 132 +++++++++++++++++
 .../gcc.target/riscv/rvv/xsfvector/sf_vc_v.c  | 107 ++++++++++++++
 .../gcc.target/riscv/rvv/xsfvector/sf_vc_x.c  | 138 ++++++++++++++++++
 4 files changed, 465 insertions(+)
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_f.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_i.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_v.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_x.c

diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_f.c 
b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_f.c
new file mode 100644
index 00000000000..fde70a80e2e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_f.c
@@ -0,0 +1,88 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_xsfvcp -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "riscv_vector.h"
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+
+/*
+** test_sf_vc_v_fv_u16mf4:
+** ...
+** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+
+** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+
+** ...
+*/
+vuint16mf4_t test_sf_vc_v_fv_u16mf4(vuint16mf4_t vs2, float16_t fs1, size_t 
vl) {
+    return __riscv_sf_vc_v_fv_u16mf4(1, vs2, fs1, vl);
+}
+
+/*
+** test_sf_vc_v_fv_se_u16mf4:
+** ...
+** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+
+** sf\.vc\.v\.fv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+
+** ...
+*/
+vuint16mf4_t test_sf_vc_v_fv_se_u16mf4(vuint16mf4_t vs2, float16_t fs1, size_t 
vl) {
+    return __riscv_sf_vc_v_fv_se_u16mf4(1, vs2, fs1, vl);
+}
+
+/*
+** test_sf_vc_fv_se_u16mf2:
+** ...
+** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+
+** sf\.vc\.fv\t[0-9]+,[0-9]+,v[0-9]+,fa[0-9]+
+** ...
+*/
+void test_sf_vc_fv_se_u16mf2(vuint16mf2_t vs2, float16_t fs1, size_t vl) {
+    __riscv_sf_vc_fv_se_u16mf2(1, 3, vs2, fs1, vl);
+}
+
+/*
+** test_sf_vc_v_fvv_u16m1:
+** ...
+** vsetivli\s+zero+,0+,e16+,m1,ta,ma+
+** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+
+** ...
+*/
+vuint16m1_t test_sf_vc_v_fvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, float16_t 
fs1, size_t vl) {
+    return __riscv_sf_vc_v_fvv_u16m1(1, vd, vs2, fs1, vl);
+}
+
+/*
+** test_sf_vc_v_fvv_se_u16m1:
+** ...
+** vsetivli\s+zero+,0+,e16+,m1,ta,ma+
+** sf\.vc\.v\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+
+** ...
+*/
+vuint16m1_t test_sf_vc_v_fvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, 
float16_t fs1, size_t vl) {
+    return __riscv_sf_vc_v_fvv_se_u16m1(1, vd, vs2, fs1, vl);
+}
+
+/*
+** test_sf_vc_fvv_se_u32m8:
+** ...
+** vsetivli\s+zero+,0+,e32+,m8,ta,ma+
+** sf\.vc\.fvv\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+
+** ...
+*/
+void test_sf_vc_fvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float32_t fs1, 
size_t vl) {
+    __riscv_sf_vc_fvv_se_u32m8(1, vd, vs2, fs1, vl);
+}
+
+
+/*
+** test_sf_vc_fvw_se_u32m2:
+** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
+** sf\.vc\.fvw\t[0-9]+,v[0-9]+,v[0-9]+,fa[0-9]+
+** ...
+*/
+void test_sf_vc_fvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float32_t fs1, 
size_t vl) {
+    __riscv_sf_vc_fvw_se_u32m2(1, vd, vs2, fs1, vl);
+}
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_i.c 
b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_i.c
new file mode 100644
index 00000000000..bd2e85ee4e2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_i.c
@@ -0,0 +1,132 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_xsfvcp -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "riscv_vector.h"
+
+
+/*
+** test_sf_vc_v_i_u16m4:
+** ...
+** vsetivli\s+zero+,0+,e16+,m4,ta,ma+
+** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+vuint16m4_t test_sf_vc_v_i_u16m4(size_t vl) {
+    return __riscv_sf_vc_v_i_u16m4(1, 2, 4, vl);
+}
+
+/*
+** test_sf_vc_v_i_se_u16m4:
+** ...
+** vsetivli\s+zero+,0+,e16+,m4,ta,ma+
+** sf\.vc\.v\.i\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+vuint16m4_t test_sf_vc_v_i_se_u16m4(size_t vl) {
+    return __riscv_sf_vc_v_i_se_u16m4(1, 2, 4, vl);
+}
+
+/*
+** test_sf_vc_i_se_u16mf4:
+** ...
+** vsetivli\s+zero+,0+,e16+,mf4,ta,ma+
+** sf\.vc\.i\t[0-9]+,[0-9]+,[0-9]+,[0-9]+
+** ...
+*/
+void test_sf_vc_i_se_u16mf4(size_t vl) {
+    __riscv_sf_vc_i_se_u16mf4(1, 2, 3, 4, vl);
+}
+
+/*
+** test_sf_vc_v_iv_u32m2:
+** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
+** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+vuint32m2_t test_sf_vc_v_iv_u32m2(vuint32m2_t vs2, size_t vl) {
+    return __riscv_sf_vc_v_iv_u32m2(1, vs2, 4, vl);
+}
+
+/*
+** test_sf_vc_v_iv_se_u32m2:
+** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
+** sf\.vc\.v\.iv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+vuint32m2_t test_sf_vc_v_iv_se_u32m2(vuint32m2_t vs2, size_t vl) {
+    return __riscv_sf_vc_v_iv_se_u32m2(1, vs2, 4, vl);
+}
+
+/*
+** test_sf_vc_iv_se_u16m2:
+** ...
+** vsetivli\s+zero+,0+,e16+,m2,ta,ma+
+** sf\.vc\.iv\t[0-9]+,[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+void test_sf_vc_iv_se_u16m2(vuint16m2_t vs2, size_t vl) {
+    __riscv_sf_vc_iv_se_u16m2(1, 3, vs2, 4, vl);
+}
+
+/*
+** test_sf_vc_v_ivv_u8m8:
+** ...
+** vsetivli\s+zero+,0+,e8+,m8,ta,ma+
+** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+vuint8m8_t test_sf_vc_v_ivv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) {
+    return __riscv_sf_vc_v_ivv_u8m8(1, vd, vs2, 4, vl);
+}
+
+/*
+** test_sf_vc_v_ivv_se_u8m8:
+** ...
+** vsetivli\s+zero+,0+,e8+,m8,ta,ma+
+** sf\.vc\.v\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+vuint8m8_t test_sf_vc_v_ivv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) {
+    return __riscv_sf_vc_v_ivv_se_u8m8(1, vd, vs2, 4, vl);
+}
+
+/*
+** test_sf_vc_ivv_se_u64m1:
+** ...
+** vsetivli\s+zero+,0+,e64+,m1,ta,ma+
+** sf\.vc\.ivv\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+void test_sf_vc_ivv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) {
+    __riscv_sf_vc_ivv_se_u64m1(1, vd, vs2, 4, vl);
+}
+
+/*
+** test_sf_vc_v_ivw_u8mf4:
+** ...
+** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+
+** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+vuint16mf2_t test_sf_vc_v_ivw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t 
vl) {
+    return __riscv_sf_vc_v_ivw_u8mf4(1, vd, vs2, 4, vl);
+}
+
+/*
+** test_sf_vc_v_ivw_se_u8mf4:
+** ...
+** vsetivli\s+zero+,0+,e8+,mf4,ta,ma+
+** sf\.vc\.v\.ivw\t[0-9]+,v[0-9]+,v[0-9]+,[0-9]+
+** ...
+*/
+vuint16mf2_t test_sf_vc_v_ivw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, 
size_t vl) {
+    return __riscv_sf_vc_v_ivw_se_u8mf4(1, vd, vs2, 4, vl);
+}
+
+void test_sf_vc_ivw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) {
+    __riscv_sf_vc_ivw_se_u32m4(1, vd, vs2, 4, vl);
+}
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_v.c 
b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_v.c
new file mode 100644
index 00000000000..a98b01e4875
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_v.c
@@ -0,0 +1,107 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_xsfvcp -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "riscv_vector.h"
+
+
+/*
+** test_sf_vc_v_vv_u8mf8:
+** ...
+** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+
+** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+vuint8mf8_t test_sf_vc_v_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t rs1, size_t vl) 
{
+    return __riscv_sf_vc_v_vv_u8mf8(1, vs2, rs1, vl);
+}
+
+/*
+** test_sf_vc_v_vv_se_u8mf8:
+** ...
+** vsetivli\s+zero+,0+,e8+,mf8,ta,ma+
+** sf\.vc\.v\.vv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+vuint8mf8_t test_sf_vc_v_vv_se_u8mf8(vuint8mf8_t vs2, vuint8mf8_t rs1, size_t 
vl) {
+    return __riscv_sf_vc_v_vv_se_u8mf8(1, vs2, rs1, vl);
+}
+
+/*
+** test_sf_vc_vv_se_u16m1:
+** ...
+** vsetivli\s+zero+,0+,e16+,m1,ta,ma+
+** sf\.vc\.vv\t[0-9]+,[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+void test_sf_vc_vv_se_u16m1(vuint16m1_t vs2, vuint16m1_t rs1, size_t vl) {
+    __riscv_sf_vc_vv_se_u16m1(1, 3, vs2, rs1, vl);
+}
+
+/*
+** test_sf_vc_v_vvv_u32mf2:
+** ...
+** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+
+** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+vuint32mf2_t test_sf_vc_v_vvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, 
vuint32mf2_t rs1, size_t vl) {
+    return __riscv_sf_vc_v_vvv_u32mf2(1, vd, vs2, rs1, vl);
+}
+
+/*
+** test_sf_vc_v_vvv_se_u32mf2:
+** ...
+** vsetivli\s+zero+,0+,e32+,mf2,ta,ma+
+** sf\.vc\.v\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+vuint32mf2_t test_sf_vc_v_vvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, 
vuint32mf2_t rs1, size_t vl) {
+    return __riscv_sf_vc_v_vvv_se_u32mf2(1, vd, vs2, rs1, vl);
+}
+
+/*
+** test_sf_vc_vvv_se_u64m1:
+** ...
+** vsetivli\s+zero+,0+,e64+,m1,ta,ma+
+** sf\.vc\.vvv\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+void test_sf_vc_vvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t rs1, 
size_t vl) {
+    __riscv_sf_vc_vvv_se_u64m1(1, vd, vs2, rs1, vl);
+}
+
+
+/*
+** test_sf_vc_v_vvw_u8m1:
+** ...
+** vsetivli\s+zero+,0+,e8+,m1,ta,ma+
+** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+vuint16m2_t test_sf_vc_v_vvw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t 
rs1, size_t vl) {
+    return __riscv_sf_vc_v_vvw_u8m1(1, vd, vs2, rs1, vl);
+}
+
+/*
+** test_sf_vc_v_vvw_se_u8m1:
+** ...
+** vsetivli\s+zero+,0+,e8+,m1,ta,ma+
+** sf\.vc\.v\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+vuint16m2_t test_sf_vc_v_vvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, 
vuint8m1_t rs1, size_t vl) {
+    return __riscv_sf_vc_v_vvw_se_u8m1(1, vd, vs2, rs1, vl);
+}
+
+/*
+** test_sf_vc_vvw_se_u16mf2:
+** ...
+** vsetivli\s+zero+,0+,e16+,mf2,ta,ma+
+** sf\.vc\.vvw\t[0-9]+,v[0-9]+,v[0-9]+,v[0-9]+
+** ...
+*/
+void test_sf_vc_vvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t 
rs1, size_t vl) {
+    __riscv_sf_vc_vvw_se_u16mf2(1, vd, vs2, rs1, vl);
+}
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_x.c 
b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_x.c
new file mode 100644
index 00000000000..0d33d9839c6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vc_x.c
@@ -0,0 +1,138 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv_xsfvcp -mabi=lp64d -O3" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "riscv_vector.h"
+
+/*
+** test_sf_vc_v_x_u32m1:
+** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
+** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+vuint32m1_t test_sf_vc_v_x_u32m1(uint32_t xs1, size_t vl) {
+    return __riscv_sf_vc_v_x_u32m1(1, 2, xs1, vl);
+}
+
+/*
+** test_sf_vc_v_x_se_u32m1:
+** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
+** sf\.vc\.v\.x\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+vuint32m1_t test_sf_vc_v_x_se_u32m1(uint32_t xs1, size_t vl) {
+    return __riscv_sf_vc_v_x_se_u32m1(1, 2, xs1, vl);
+}
+
+/*
+** test_sf_vc_x_se_u16m8:
+** ...
+** vsetivli\s+zero+,0+,e16+,m8,ta,ma+
+** sf\.vc\.x\t[0-9]+,[0-9]+,[0-9]+,a[0-9]+
+** ...
+*/
+void test_sf_vc_x_se_u16m8(uint16_t xs1, size_t vl) {
+    __riscv_sf_vc_x_se_u16m8(1, 2, 3, xs1, vl);
+}
+
+/*
+** test_sf_vc_v_xv_u32m2:
+** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
+** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+vuint32m2_t test_sf_vc_v_xv_u32m2(vuint32m2_t vs2, uint32_t xs1, size_t vl) {
+    return __riscv_sf_vc_v_xv_u32m2(1, vs2, xs1, vl);
+}
+
+/*
+** test_sf_vc_v_xv_se_u32m2:
+** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
+** sf\.vc\.v\.xv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+vuint32m2_t test_sf_vc_v_xv_se_u32m2(vuint32m2_t vs2, uint32_t xs1, size_t vl) 
{
+    return __riscv_sf_vc_v_xv_se_u32m2(1, vs2, xs1, vl);
+}
+
+/*
+** test_sf_vc_xv_se_u16m4:
+** ...
+** vsetivli\s+zero+,0+,e16+,m4,ta,ma+
+** sf\.vc\.xv\t[0-9]+,[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+void test_sf_vc_xv_se_u16m4(vuint16m4_t vs2, uint16_t xs1, size_t vl) {
+    __riscv_sf_vc_xv_se_u16m4(1, 3, vs2, xs1, vl);
+}
+
+/*
+** test_sf_vc_v_xvv_u16m1:
+** ...
+** vsetivli\s+zero+,0+,e16+,m1,ta,ma+
+** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+vuint16m1_t test_sf_vc_v_xvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t 
xs1, size_t vl) {
+    return __riscv_sf_vc_v_xvv_u16m1(1, vd, vs2, xs1, vl);
+}
+
+/*
+** test_sf_vc_v_xvv_se_u16m1:
+** ...
+** vsetivli\s+zero+,0+,e16+,m1,ta,ma+
+** sf\.vc\.v\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+vuint16m1_t test_sf_vc_v_xvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, 
uint16_t xs1, size_t vl) {
+    return __riscv_sf_vc_v_xvv_se_u16m1(1, vd, vs2, xs1, vl);
+}
+
+/*
+** test_sf_vc_xvv_se_u32m2:
+** ...
+** vsetivli\s+zero+,0+,e32+,m2,ta,ma+
+** sf\.vc\.xvv\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+void test_sf_vc_xvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t xs1, 
size_t vl) {
+    __riscv_sf_vc_xvv_se_u32m2(1, vd, vs2, xs1, vl);
+}
+
+/*
+** test_sf_vc_v_xvw_u32m1:
+** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
+** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+vuint64m2_t test_sf_vc_v_xvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t 
xs1, size_t vl) {
+    return __riscv_sf_vc_v_xvw_u32m1(1, vd, vs2, xs1, vl);
+}
+
+/*
+** test_sf_vc_v_xvw_se_u32m1:
+** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
+** sf\.vc\.v\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+vuint64m2_t test_sf_vc_v_xvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, 
uint32_t xs1, size_t vl) {
+    return __riscv_sf_vc_v_xvw_se_u32m1(1, vd, vs2, xs1, vl);
+}
+
+/*
+** test_sf_vc_xvw_se_u32m1:
+** ...
+** vsetivli\s+zero+,0+,e32+,m1,ta,ma+
+** sf\.vc\.xvw\t[0-9]+,v[0-9]+,v[0-9]+,a[0-9]+
+** ...
+*/
+void test_sf_vc_xvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t xs1, 
size_t vl) {
+    __riscv_sf_vc_xvw_se_u32m1(1, vd, vs2, xs1, vl);
+}
+
-- 
2.34.1

Reply via email to