eopXD created this revision. eopXD added reviewers: craig.topper, kito-cheng, rogfer01, frasercrmck. Herald added subscribers: jobnoorman, luke, VincentWu, vkmr, luismarques, apazos, sameer.abuasal, s.egerton, Jim, benna, psnobl, jocewei, PkmX, the_o, brucehoult, MartinMosbeck, edward-jones, zzheng, jrtc27, shiva0217, niosHD, sabuasal, simoncook, johnrusso, rbar, asb, arichardson. Herald added a project: All. eopXD requested review of this revision. Herald added subscribers: cfe-commits, pcwang-thead, MaskRay. Herald added a project: clang.
This patch expands all variants for unit stride segment store. The store intrinsics does not have any policy variants. This patch also fixes the trailing suffix in the intrinsics' function name that representing the return type, adding `x{NF}`. For the same reason mentioned in [2/N], only full test case for vsseg2e32 is added. Repository: rG LLVM Github Monorepo https://reviews.llvm.org/D152072 Files: clang/include/clang/Basic/riscv_vector.td clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e32_tuple.c clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e32_tuple.c
Index: clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e32_tuple.c =================================================================== --- /dev/null +++ clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsseg2e32_tuple.c @@ -0,0 +1,345 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include <riscv_vector.h> + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 1 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x float>, <vscale x 1 x float> } poison, <vscale x 1 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP0]], <vscale x 1 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1f32.i64(<vscale x 1 x float> [[TMP2]], <vscale x 1 x float> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32mf2x2(float *base, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 2 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x float>, <vscale x 2 x float> } poison, <vscale x 2 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP0]], <vscale x 2 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2f32.i64(<vscale x 2 x float> [[TMP2]], <vscale x 2 x float> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32m1x2(float *base, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 4 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x float>, <vscale x 4 x float> } poison, <vscale x 4 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], <vscale x 4 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4f32.i64(<vscale x 4 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32m2x2(float *base, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 8 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x float>, <vscale x 8 x float> } poison, <vscale x 8 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP0]], <vscale x 8 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8f32.i64(<vscale x 8 x float> [[TMP2]], <vscale x 8 x float> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32m4x2(float *base, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } poison, <vscale x 1 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], <vscale x 1 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_i32mf2x2(int32_t *base, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_i32m1x2(int32_t *base, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } poison, <vscale x 4 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], <vscale x 4 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4i32.i64(<vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_i32m2x2(int32_t *base, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP0]], <vscale x 8 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8i32.i64(<vscale x 8 x i32> [[TMP2]], <vscale x 8 x i32> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_i32m4x2(int32_t *base, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } poison, <vscale x 1 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], <vscale x 1 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_u32mf2x2(uint32_t *base, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_u32m1x2(uint32_t *base, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } poison, <vscale x 4 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], <vscale x 4 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4i32.i64(<vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_u32m2x2(uint32_t *base, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP0]], <vscale x 8 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8i32.i64(<vscale x 8 x i32> [[TMP2]], <vscale x 8 x i32> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_u32m4x2(uint32_t *base, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_f32mf2x2_m +// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x float>, <vscale x 1 x float> } poison, <vscale x 1 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP0]], <vscale x 1 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1f32.i64(<vscale x 1 x float> [[TMP2]], <vscale x 1 x float> [[TMP3]], ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32mf2x2_m(vbool64_t mask, float *base, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_f32m1x2_m +// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x float>, <vscale x 2 x float> } poison, <vscale x 2 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP0]], <vscale x 2 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2f32.i64(<vscale x 2 x float> [[TMP2]], <vscale x 2 x float> [[TMP3]], ptr [[BASE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32m1x2_m(vbool32_t mask, float *base, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_f32m2x2_m +// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x float>, <vscale x 4 x float> } poison, <vscale x 4 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], <vscale x 4 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4f32.i64(<vscale x 4 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], ptr [[BASE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32m2x2_m(vbool16_t mask, float *base, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_f32m4x2_m +// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 8 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x float>, <vscale x 8 x float> } poison, <vscale x 8 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP0]], <vscale x 8 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8f32.i64(<vscale x 8 x float> [[TMP2]], <vscale x 8 x float> [[TMP3]], ptr [[BASE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32m4x2_m(vbool8_t mask, float *base, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32mf2x2_m +// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } poison, <vscale x 1 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], <vscale x 1 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP3]], ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32m1x2_m +// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32m2x2_m +// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } poison, <vscale x 4 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], <vscale x 4 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4i32.i64(<vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], ptr [[BASE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32m4x2_m +// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP0]], <vscale x 8 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8i32.i64(<vscale x 8 x i32> [[TMP2]], <vscale x 8 x i32> [[TMP3]], ptr [[BASE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_u32mf2x2_m +// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } poison, <vscale x 1 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], <vscale x 1 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP3]], ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_u32m1x2_m +// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_u32m2x2_m +// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } poison, <vscale x 4 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], <vscale x 4 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4i32.i64(<vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], ptr [[BASE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_u32m4x2_m +// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP0]], <vscale x 8 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8i32.i64(<vscale x 8 x i32> [[TMP2]], <vscale x 8 x i32> [[TMP3]], ptr [[BASE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_tuple(mask, base, v_tuple, vl); +} + Index: clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e32_tuple.c =================================================================== --- clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e32_tuple.c +++ clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e32_tuple.c @@ -4,10 +4,81 @@ // RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s + #include <riscv_vector.h> -// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_tuple_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 1 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x float>, <vscale x 1 x float> } poison, <vscale x 1 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP0]], <vscale x 1 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1f32.i64(<vscale x 1 x float> [[TMP2]], <vscale x 1 x float> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32mf2x2(float *base, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_f32mf2x2(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 2 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x float>, <vscale x 2 x float> } poison, <vscale x 2 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP0]], <vscale x 2 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2f32.i64(<vscale x 2 x float> [[TMP2]], <vscale x 2 x float> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32m1x2(float *base, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_f32m1x2(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 4 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x float>, <vscale x 4 x float> } poison, <vscale x 4 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], <vscale x 4 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4f32.i64(<vscale x 4 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32m2x2(float *base, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_f32m2x2(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 8 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x float>, <vscale x 8 x float> } poison, <vscale x 8 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP0]], <vscale x 8 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8f32.i64(<vscale x 8 x float> [[TMP2]], <vscale x 8 x float> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32m4x2(float *base, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_f32m4x2(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } poison, <vscale x 1 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], <vscale x 1 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_i32mf2x2(int32_t *base, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_i32mf2x2(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0 // CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1 @@ -16,16 +87,259 @@ // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_tuple_i32m1(int32_t *base, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_tuple_i32m1(base, v_tuple, vl); +void test_vsseg2e32_v_i32m1x2(int32_t *base, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_i32m1x2(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } poison, <vscale x 4 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], <vscale x 4 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4i32.i64(<vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_i32m2x2(int32_t *base, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_i32m2x2(base, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32m1_m -// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2i32.i64(<vscale x 2 x i32> [[V0]], <vscale x 2 x i32> [[V1]], ptr [[BASE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP0]], <vscale x 8 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8i32.i64(<vscale x 8 x i32> [[TMP2]], <vscale x 8 x i32> [[TMP3]], ptr [[BASE]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsseg2e32_v_i32m1_m(mask, base, v0, v1, vl); +void test_vsseg2e32_v_i32m4x2(int32_t *base, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_i32m4x2(base, v_tuple, vl); } + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } poison, <vscale x 1 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], <vscale x 1 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_u32mf2x2(uint32_t *base, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_u32mf2x2(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_u32m1x2(uint32_t *base, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_u32m1x2(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } poison, <vscale x 4 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], <vscale x 4 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4i32.i64(<vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_u32m2x2(uint32_t *base, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_u32m2x2(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP0]], <vscale x 8 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8i32.i64(<vscale x 8 x i32> [[TMP2]], <vscale x 8 x i32> [[TMP3]], ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_u32m4x2(uint32_t *base, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_u32m4x2(base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_f32mf2x2_m +// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x float>, <vscale x 1 x float> } poison, <vscale x 1 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP0]], <vscale x 1 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, <vscale x 1 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1f32.i64(<vscale x 1 x float> [[TMP2]], <vscale x 1 x float> [[TMP3]], ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32mf2x2_m(vbool64_t mask, float *base, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_f32mf2x2_m(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_f32m1x2_m +// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x float>, <vscale x 2 x float> } poison, <vscale x 2 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP0]], <vscale x 2 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x float>, <vscale x 2 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2f32.i64(<vscale x 2 x float> [[TMP2]], <vscale x 2 x float> [[TMP3]], ptr [[BASE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32m1x2_m(vbool32_t mask, float *base, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_f32m1x2_m(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_f32m2x2_m +// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x float>, <vscale x 4 x float> } poison, <vscale x 4 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], <vscale x 4 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4f32.i64(<vscale x 4 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], ptr [[BASE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32m2x2_m(vbool16_t mask, float *base, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_f32m2x2_m(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_f32m4x2_m +// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 8 x float> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x float> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x float>, <vscale x 8 x float> } poison, <vscale x 8 x float> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP0]], <vscale x 8 x float> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8f32.i64(<vscale x 8 x float> [[TMP2]], <vscale x 8 x float> [[TMP3]], ptr [[BASE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32m4x2_m(vbool8_t mask, float *base, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_f32m4x2_m(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32mf2x2_m +// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } poison, <vscale x 1 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], <vscale x 1 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP3]], ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_i32mf2x2_m(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32m1x2_m +// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_i32m1x2_m(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32m2x2_m +// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } poison, <vscale x 4 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], <vscale x 4 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4i32.i64(<vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], ptr [[BASE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_i32m2x2_m(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_i32m4x2_m +// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP0]], <vscale x 8 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8i32.i64(<vscale x 8 x i32> [[TMP2]], <vscale x 8 x i32> [[TMP3]], ptr [[BASE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_i32m4x2_m(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_u32mf2x2_m +// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 1 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } poison, <vscale x 1 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], <vscale x 1 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP3]], ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_u32mf2x2_m(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_u32m1x2_m +// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 2 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } poison, <vscale x 2 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]], <vscale x 2 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2i32.i64(<vscale x 2 x i32> [[TMP2]], <vscale x 2 x i32> [[TMP3]], ptr [[BASE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_u32m1x2_m(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_u32m2x2_m +// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 4 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } poison, <vscale x 4 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], <vscale x 4 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4i32.i64(<vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], ptr [[BASE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_u32m2x2_m(mask, base, v_tuple, vl); +} + +// CHECK-RV64-LABEL: define dso_local void @test_vsseg2e32_v_u32m4x2_m +// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE0:%.*]], <vscale x 8 x i32> [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } poison, <vscale x 8 x i32> [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP0]], <vscale x 8 x i32> [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8i32.i64(<vscale x 8 x i32> [[TMP2]], <vscale x 8 x i32> [[TMP3]], ptr [[BASE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsseg2e32_v_tuple_u32m4x2_m(mask, base, v_tuple, vl); +} + Index: clang/include/clang/Basic/riscv_vector.td =================================================================== --- clang/include/clang/Basic/riscv_vector.td +++ clang/include/clang/Basic/riscv_vector.td @@ -1565,9 +1565,15 @@ } multiclass RVVUnitStridedSegStoreTuple<string op> { - foreach type = ["i"] in { - defvar eew = !cond(!eq(type, "i") : "32"); - foreach nf = [2] in { + foreach type = TypeList in { + defvar eew = !cond(!eq(type, "c") : "8", + !eq(type, "s") : "16", + !eq(type, "i") : "32", + !eq(type, "l") : "64", + !eq(type, "x") : "16", + !eq(type, "f") : "32", + !eq(type, "d") : "64"); + foreach nf = NFList in { let Name = op # nf # "e" # eew # "_v_tuple", OverloadedName = op # nf # "e" # eew # "_tuple", IRName = op # nf, @@ -1600,8 +1606,11 @@ return Builder.CreateCall(F, Operands, ""); } }] in { - defvar T = "(Tuple:" # nf # ")"; - def : RVVBuiltin<"v", "0Pe" # T # "v", type>; + defvar T = "(Tuple:" # nf # ")"; + def : RVVBuiltin<T # "v", "0Pe" # T # "v", type>; + if !not(IsFloat<type>.val) then { + def : RVVBuiltin<T # "Uv", "0PUe" # T # "Uv", type>; + } } } }
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits