eopXD created this revision. eopXD added reviewers: craig.topper, kito-cheng, rogfer01, frasercrmck. Herald added subscribers: jobnoorman, luke, VincentWu, vkmr, luismarques, apazos, sameer.abuasal, s.egerton, Jim, benna, psnobl, jocewei, PkmX, the_o, brucehoult, MartinMosbeck, edward-jones, zzheng, jrtc27, shiva0217, niosHD, sabuasal, simoncook, johnrusso, rbar, asb, arichardson. Herald added a project: All. eopXD requested review of this revision. Herald added subscribers: cfe-commits, pcwang-thead, MaskRay. Herald added a project: clang.
This patch expands all variants for strided segment store. The store intrinsics does not have any policy variants. This patch also fixes the trailing suffix in the intrinsics' function name that representing the return type, adding `x{NF}`. For the same reason mentioned in [2/N], only full test case for vssseg2e32 is added for now. Repository: rG LLVM Github Monorepo https://reviews.llvm.org/D152075 Files: clang/include/clang/Basic/riscv_vector.td clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e32_tuple.c clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e32_tuple.c
Index: clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e32_tuple.c =================================================================== --- /dev/null +++ clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vssseg2e32_tuple.c @@ -0,0 +1,345 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include <riscv_vector.h> + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 1 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x float>, <vscale x 1 x float> } poison, <vscale x 1 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP0]], <vscale x 1 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f32.i64(<vscale x 1 x float> [[TMP2]], <vscale x 1 x float> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32mf2x2(float *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 2 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x float>, <vscale x 2 x float> } poison, <vscale x 2 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP0]], <vscale x 2 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f32.i64(<vscale x 2 x float> [[TMP2]], <vscale x 2 x float> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32m1x2(float *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 4 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x float>, <vscale x 4 x float> } poison, <vscale x 4 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], <vscale x 4 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f32.i64(<vscale x 4 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32m2x2(float *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 8 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x float>, <vscale x 8 x float> } poison, <vscale x 8 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP0]], <vscale x 8 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f32.i64(<vscale x 8 x float> [[TMP2]], <vscale x 8 x float> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32m4x2(float *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } poison, <vscale x 1 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], <vscale x 1 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_i32mf2x2(int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_i32m1x2(int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } poison, <vscale x 4 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], <vscale x 4 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64(<vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_i32m2x2(int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP0]], <vscale x 8 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64(<vscale x 8 x i32> [[TMP2]], <vscale x 8 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_i32m4x2(int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } poison, <vscale x 1 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], <vscale x 1 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_u32mf2x2(uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_u32m1x2(uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } poison, <vscale x 4 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], <vscale x 4 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64(<vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_u32m2x2(uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP0]], <vscale x 8 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64(<vscale x 8 x i32> [[TMP2]], <vscale x 8 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_u32m4x2(uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32mf2x2_m +// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 1 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x float>, <vscale x 1 x float> } poison, <vscale x 1 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP0]], <vscale x 1 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f32.i64(<vscale x 1 x float> [[TMP2]], <vscale x 1 x float> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32mf2x2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m1x2_m +// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 2 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x float>, <vscale x 2 x float> } poison, <vscale x 2 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP0]], <vscale x 2 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f32.i64(<vscale x 2 x float> [[TMP2]], <vscale x 2 x float> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32m1x2_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m2x2_m +// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 4 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x float>, <vscale x 4 x float> } poison, <vscale x 4 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], <vscale x 4 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f32.i64(<vscale x 4 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32m2x2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m4x2_m +// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 8 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x float>, <vscale x 8 x float> } poison, <vscale x 8 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP0]], <vscale x 8 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f32.i64(<vscale x 8 x float> [[TMP2]], <vscale x 8 x float> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32m4x2_m(vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32mf2x2_m +// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } poison, <vscale x 1 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], <vscale x 1 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m1x2_m +// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_i32m1x2_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m2x2_m +// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } poison, <vscale x 4 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], <vscale x 4 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64(<vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_i32m2x2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m4x2_m +// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP0]], <vscale x 8 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64(<vscale x 8 x i32> [[TMP2]], <vscale x 8 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_i32m4x2_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32mf2x2_m +// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } poison, <vscale x 1 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], <vscale x 1 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m1x2_m +// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m2x2_m +// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } poison, <vscale x 4 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], <vscale x 4 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64(<vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m4x2_m +// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP0]], <vscale x 8 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64(<vscale x 8 x i32> [[TMP2]], <vscale x 8 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_tuple(mask, base, bstride, v_tuple, vl); +} + Index: clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e32_tuple.c =================================================================== --- clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e32_tuple.c +++ clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e32_tuple.c @@ -7,8 +7,78 @@ #include <riscv_vector.h> -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_tuple_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 1 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x float>, <vscale x 1 x float> } poison, <vscale x 1 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP0]], <vscale x 1 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f32.i64(<vscale x 1 x float> [[TMP2]], <vscale x 1 x float> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32mf2x2(float *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_f32mf2x2(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 2 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x float>, <vscale x 2 x float> } poison, <vscale x 2 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP0]], <vscale x 2 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f32.i64(<vscale x 2 x float> [[TMP2]], <vscale x 2 x float> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32m1x2(float *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_f32m1x2(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 4 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x float>, <vscale x 4 x float> } poison, <vscale x 4 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], <vscale x 4 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f32.i64(<vscale x 4 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32m2x2(float *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_f32m2x2(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 8 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x float>, <vscale x 8 x float> } poison, <vscale x 8 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP0]], <vscale x 8 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f32.i64(<vscale x 8 x float> [[TMP2]], <vscale x 8 x float> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32m4x2(float *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_f32m4x2(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } poison, <vscale x 1 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], <vscale x 1 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_i32mf2x2(int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_i32mf2x2(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0 // CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1 @@ -17,11 +87,165 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_tuple_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_i32m1(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32m1x2(int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_i32m1x2(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } poison, <vscale x 4 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], <vscale x 4 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64(<vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_i32m2x2(int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_i32m2x2(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP0]], <vscale x 8 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64(<vscale x 8 x i32> [[TMP2]], <vscale x 8 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_i32m4x2(int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_i32m4x2(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } poison, <vscale x 1 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], <vscale x 1 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_u32mf2x2(uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_u32mf2x2(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_u32m1x2(uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_u32m1x2(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } poison, <vscale x 4 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], <vscale x 4 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4i32.i64(<vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_u32m2x2(uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_u32m2x2(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP0]], <vscale x 8 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8i32.i64(<vscale x 8 x i32> [[TMP2]], <vscale x 8 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_u32m4x2(uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_u32m4x2(base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32mf2x2_m +// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 1 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x float>, <vscale x 1 x float> } poison, <vscale x 1 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP0]], <vscale x 1 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f32.i64(<vscale x 1 x float> [[TMP2]], <vscale x 1 x float> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32mf2x2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_f32mf2x2_m(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m1x2_m +// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 2 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x float>, <vscale x 2 x float> } poison, <vscale x 2 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP0]], <vscale x 2 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f32.i64(<vscale x 2 x float> [[TMP2]], <vscale x 2 x float> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32m1x2_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_f32m1x2_m(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m2x2_m +// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 4 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x float>, <vscale x 4 x float> } poison, <vscale x 4 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], <vscale x 4 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f32.i64(<vscale x 4 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32m2x2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_f32m2x2_m(mask, base, bstride, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_tuple_i32m1_m +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_f32m4x2_m +// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 8 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x float>, <vscale x 8 x float> } poison, <vscale x 8 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP0]], <vscale x 8 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f32.i64(<vscale x 8 x float> [[TMP2]], <vscale x 8 x float> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32m4x2_m(vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_f32m4x2_m(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32mf2x2_m +// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } poison, <vscale x 1 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], <vscale x 1 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_i32mf2x2_m(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m1x2_m // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0 @@ -31,6 +255,91 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_tuple_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_tuple_i32m1_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32m1x2_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_i32m1x2_m(mask, base, bstride, v_tuple, vl); } + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m2x2_m +// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } poison, <vscale x 4 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], <vscale x 4 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64(<vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_i32m2x2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_i32m2x2_m(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_i32m4x2_m +// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP0]], <vscale x 8 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64(<vscale x 8 x i32> [[TMP2]], <vscale x 8 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_i32m4x2_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_i32m4x2_m(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32mf2x2_m +// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } poison, <vscale x 1 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], <vscale x 1 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_u32mf2x2_m(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m1x2_m +// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_u32m1x2_m(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m2x2_m +// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } poison, <vscale x 4 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], <vscale x 4 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4i32.i64(<vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_u32m2x2_m(mask, base, bstride, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vssseg2e32_v_u32m4x2_m +// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP0]], <vscale x 8 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8i32.i64(<vscale x 8 x i32> [[TMP2]], <vscale x 8 x i32> [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vssseg2e32_v_tuple_u32m4x2_m(mask, base, bstride, v_tuple, vl); +} + Index: clang/include/clang/Basic/riscv_vector.td =================================================================== --- clang/include/clang/Basic/riscv_vector.td +++ clang/include/clang/Basic/riscv_vector.td @@ -1752,9 +1752,15 @@ } multiclass RVVStridedSegStoreTuple<string op> { - foreach type = ["i"] in { - defvar eew = !cond(!eq(type, "i") : "32"); - foreach nf = [2] in { + foreach type = TypeList in { + defvar eew = !cond(!eq(type, "c") : "8", + !eq(type, "s") : "16", + !eq(type, "i") : "32", + !eq(type, "l") : "64", + !eq(type, "x") : "16", + !eq(type, "f") : "32", + !eq(type, "d") : "64"); + foreach nf = NFList in { let Name = op # nf # "e" # eew # "_v_tuple", OverloadedName = op # nf # "e" # eew # "_tuple", IRName = op # nf, @@ -1789,8 +1795,11 @@ return Builder.CreateCall(F, Operands, ""); } }] in { - defvar T = "(Tuple:" # nf # ")"; - def : RVVBuiltin<"v", "0Pet" # T # "v", type>; + defvar T = "(Tuple:" # nf # ")"; + def : RVVBuiltin<T # "v", "0Pet" # T # "v", type>; + if !not(IsFloat<type>.val) then { + def : RVVBuiltin<T # "Uv", "0PUet" # T # "Uv", type>; + } } } }
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits