Author: Francesco Petrogalli Date: 2020-06-15T16:52:36Z New Revision: 017969de766287ec6c2fc82128c62d1d1dad7bd8
URL: https://github.com/llvm/llvm-project/commit/017969de766287ec6c2fc82128c62d1d1dad7bd8 DIFF: https://github.com/llvm/llvm-project/commit/017969de766287ec6c2fc82128c62d1d1dad7bd8.diff LOG: [llvm][SveEmitter] SVE ACLE for quadword permute intrinsics. Summary: The following intrinsics have been added, guarded by the macro `__ARM_FEATURE_SVE_MATMUL_FP64`: * svtrn1q[_*] * svtrn2q[_*] * svuzp1q[_*] * svuzp2q[_*] * svzip1q[_*] * svzip2q[_*] Supported types: * svint[8|16|32|64]_t * svuint[8|16|32|64]_t * svfloat[16|32|64]_t TODO: add support for svbfloat16_t Reviewers: efriedma, sdesmalen, kmclaughlin, rengolin Reviewed By: sdesmalen Subscribers: tschuett, kristof.beyls, cfe-commits Tags: #clang Differential Revision: https://reviews.llvm.org/D80851 Added: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_trn1-fp64.c clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_trn2-fp64.c clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_uzp1-fp64.c clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_uzp2-fp64.c clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_zip1-fp64.c clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_zip2-fp64.c Modified: clang/include/clang/Basic/arm_sve.td Removed: ################################################################################ diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td index b79a245ebd04..8c6abb1c3f4f 100644 --- a/clang/include/clang/Basic/arm_sve.td +++ b/clang/include/clang/Basic/arm_sve.td @@ -1264,6 +1264,12 @@ def SVMLLA_F32 : SInst<"svmmla[_f32]", "dddd","f", MergeNone, "aarch64_sve_fmmla let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_FP64)" in { def SVMLLA_F64 : SInst<"svmmla[_f64]", "dddd","d", MergeNone, "aarch64_sve_fmmla">; +def SVTRN1Q : SInst<"svtrn1q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn1q">; +def SVTRN2Q : SInst<"svtrn2q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn2q">; +def SVUZP1Q : SInst<"svuzp1q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp1q">; +def SVUZP2Q : SInst<"svuzp2q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp2q">; +def SVZIP1Q : SInst<"svzip1q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip1q">; +def SVZIP2Q : SInst<"svzip2q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip2q">; } //////////////////////////////////////////////////////////////////////////////// diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_trn1-fp64.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_trn1-fp64.c new file mode 100644 index 000000000000..f968c1524a8b --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_trn1-fp64.c @@ -0,0 +1,88 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE_MATMUL_FP64 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE_MATMUL_FP64 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 +#endif + +svint8_t test_svtrn1_s8(svint8_t op1, svint8_t op2) { + // CHECK-LABEL: test_svtrn1_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.trn1q.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1q, _s8, , )(op1, op2); +} + +svint16_t test_svtrn1_s16(svint16_t op1, svint16_t op2) { + // CHECK-LABEL: test_svtrn1_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.trn1q.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1q, _s16, , )(op1, op2); +} + +svint32_t test_svtrn1_s32(svint32_t op1, svint32_t op2) { + // CHECK-LABEL: test_svtrn1_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.trn1q.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1q, _s32, , )(op1, op2); +} + +svint64_t test_svtrn1_s64(svint64_t op1, svint64_t op2) { + // CHECK-LABEL: test_svtrn1_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.trn1q.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1q, _s64, , )(op1, op2); +} + +svuint8_t test_svtrn1_u8(svuint8_t op1, svuint8_t op2) { + // CHECK-LABEL: test_svtrn1_u8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.trn1q.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1q, _u8, , )(op1, op2); +} + +svuint16_t test_svtrn1_u16(svuint16_t op1, svuint16_t op2) { + // CHECK-LABEL: test_svtrn1_u16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.trn1q.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1q, _u16, , )(op1, op2); +} + +svuint32_t test_svtrn1_u32(svuint32_t op1, svuint32_t op2) { + // CHECK-LABEL: test_svtrn1_u32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.trn1q.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1q, _u32, , )(op1, op2); +} + +svuint64_t test_svtrn1_u64(svuint64_t op1, svuint64_t op2) { + // CHECK-LABEL: test_svtrn1_u64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.trn1q.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1q, _u64, , )(op1, op2); +} + +svfloat16_t test_svtrn1_f16(svfloat16_t op1, svfloat16_t op2) { + // CHECK-LABEL: test_svtrn1_f16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.trn1q.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2) + // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1q, _f16, , )(op1, op2); +} + +svfloat32_t test_svtrn1_f32(svfloat32_t op1, svfloat32_t op2) { + // CHECK-LABEL: test_svtrn1_f32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.trn1q.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2) + // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1q, _f32, , )(op1, op2); +} + +svfloat64_t test_svtrn1_f64(svfloat64_t op1, svfloat64_t op2) { + // CHECK-LABEL: test_svtrn1_f64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.trn1q.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2) + // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1q, _f64, , )(op1, op2); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_trn2-fp64.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_trn2-fp64.c new file mode 100644 index 000000000000..fc26ec2b5658 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_trn2-fp64.c @@ -0,0 +1,88 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE_MATMUL_FP64 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE_MATMUL_FP64 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 +#endif + +svint8_t test_svtrn2_s8(svint8_t op1, svint8_t op2) { + // CHECK-LABEL: test_svtrn2_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.trn2q.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2q, _s8, , )(op1, op2); +} + +svint16_t test_svtrn2_s16(svint16_t op1, svint16_t op2) { + // CHECK-LABEL: test_svtrn2_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.trn2q.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2q, _s16, , )(op1, op2); +} + +svint32_t test_svtrn2_s32(svint32_t op1, svint32_t op2) { + // CHECK-LABEL: test_svtrn2_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.trn2q.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2q, _s32, , )(op1, op2); +} + +svint64_t test_svtrn2_s64(svint64_t op1, svint64_t op2) { + // CHECK-LABEL: test_svtrn2_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.trn2q.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2q, _s64, , )(op1, op2); +} + +svuint8_t test_svtrn2_u8(svuint8_t op1, svuint8_t op2) { + // CHECK-LABEL: test_svtrn2_u8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.trn2q.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2q, _u8, , )(op1, op2); +} + +svuint16_t test_svtrn2_u16(svuint16_t op1, svuint16_t op2) { + // CHECK-LABEL: test_svtrn2_u16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.trn2q.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2q, _u16, , )(op1, op2); +} + +svuint32_t test_svtrn2_u32(svuint32_t op1, svuint32_t op2) { + // CHECK-LABEL: test_svtrn2_u32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.trn2q.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2q, _u32, , )(op1, op2); +} + +svuint64_t test_svtrn2_u64(svuint64_t op1, svuint64_t op2) { + // CHECK-LABEL: test_svtrn2_u64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.trn2q.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2q, _u64, , )(op1, op2); +} + +svfloat16_t test_svtrn2_f16(svfloat16_t op1, svfloat16_t op2) { + // CHECK-LABEL: test_svtrn2_f16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.trn2q.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2) + // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2q, _f16, , )(op1, op2); +} + +svfloat32_t test_svtrn2_f32(svfloat32_t op1, svfloat32_t op2) { + // CHECK-LABEL: test_svtrn2_f32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.trn2q.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2) + // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2q, _f32, , )(op1, op2); +} + +svfloat64_t test_svtrn2_f64(svfloat64_t op1, svfloat64_t op2) { + // CHECK-LABEL: test_svtrn2_f64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.trn2q.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2) + // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2q, _f64, , )(op1, op2); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_uzp1-fp64.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_uzp1-fp64.c new file mode 100644 index 000000000000..cb96efa7f519 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_uzp1-fp64.c @@ -0,0 +1,88 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE_MATMUL_FP64 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE_MATMUL_FP64 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 +#endif + +svint8_t test_svuzp1_s8(svint8_t op1, svint8_t op2) { + // CHECK-LABEL: test_svuzp1_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uzp1q.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1q, _s8, , )(op1, op2); +} + +svint16_t test_svuzp1_s16(svint16_t op1, svint16_t op2) { + // CHECK-LABEL: test_svuzp1_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uzp1q.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1q, _s16, , )(op1, op2); +} + +svint32_t test_svuzp1_s32(svint32_t op1, svint32_t op2) { + // CHECK-LABEL: test_svuzp1_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp1q.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1q, _s32, , )(op1, op2); +} + +svint64_t test_svuzp1_s64(svint64_t op1, svint64_t op2) { + // CHECK-LABEL: test_svuzp1_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uzp1q.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1q, _s64, , )(op1, op2); +} + +svuint8_t test_svuzp1_u8(svuint8_t op1, svuint8_t op2) { + // CHECK-LABEL: test_svuzp1_u8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uzp1q.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1q, _u8, , )(op1, op2); +} + +svuint16_t test_svuzp1_u16(svuint16_t op1, svuint16_t op2) { + // CHECK-LABEL: test_svuzp1_u16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uzp1q.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1q, _u16, , )(op1, op2); +} + +svuint32_t test_svuzp1_u32(svuint32_t op1, svuint32_t op2) { + // CHECK-LABEL: test_svuzp1_u32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp1q.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1q, _u32, , )(op1, op2); +} + +svuint64_t test_svuzp1_u64(svuint64_t op1, svuint64_t op2) { + // CHECK-LABEL: test_svuzp1_u64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uzp1q.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1q, _u64, , )(op1, op2); +} + +svfloat16_t test_svuzp1_f16(svfloat16_t op1, svfloat16_t op2) { + // CHECK-LABEL: test_svuzp1_f16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.uzp1q.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2) + // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1q, _f16, , )(op1, op2); +} + +svfloat32_t test_svuzp1_f32(svfloat32_t op1, svfloat32_t op2) { + // CHECK-LABEL: test_svuzp1_f32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.uzp1q.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2) + // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1q, _f32, , )(op1, op2); +} + +svfloat64_t test_svuzp1_f64(svfloat64_t op1, svfloat64_t op2) { + // CHECK-LABEL: test_svuzp1_f64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.uzp1q.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2) + // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1q, _f64, , )(op1, op2); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_uzp2-fp64.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_uzp2-fp64.c new file mode 100644 index 000000000000..122d5c536cc0 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_uzp2-fp64.c @@ -0,0 +1,88 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE_MATMUL_FP64 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE_MATMUL_FP64 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 +#endif + +svint8_t test_svuzp2_s8(svint8_t op1, svint8_t op2) { + // CHECK-LABEL: test_svuzp2_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uzp2q.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2q, _s8, , )(op1, op2); +} + +svint16_t test_svuzp2_s16(svint16_t op1, svint16_t op2) { + // CHECK-LABEL: test_svuzp2_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uzp2q.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2q, _s16, , )(op1, op2); +} + +svint32_t test_svuzp2_s32(svint32_t op1, svint32_t op2) { + // CHECK-LABEL: test_svuzp2_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp2q.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2q, _s32, , )(op1, op2); +} + +svint64_t test_svuzp2_s64(svint64_t op1, svint64_t op2) { + // CHECK-LABEL: test_svuzp2_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uzp2q.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2q, _s64, , )(op1, op2); +} + +svuint8_t test_svuzp2_u8(svuint8_t op1, svuint8_t op2) { + // CHECK-LABEL: test_svuzp2_u8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uzp2q.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2q, _u8, , )(op1, op2); +} + +svuint16_t test_svuzp2_u16(svuint16_t op1, svuint16_t op2) { + // CHECK-LABEL: test_svuzp2_u16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uzp2q.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2q, _u16, , )(op1, op2); +} + +svuint32_t test_svuzp2_u32(svuint32_t op1, svuint32_t op2) { + // CHECK-LABEL: test_svuzp2_u32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp2q.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2q, _u32, , )(op1, op2); +} + +svuint64_t test_svuzp2_u64(svuint64_t op1, svuint64_t op2) { + // CHECK-LABEL: test_svuzp2_u64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uzp2q.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2q, _u64, , )(op1, op2); +} + +svfloat16_t test_svuzp2_f16(svfloat16_t op1, svfloat16_t op2) { + // CHECK-LABEL: test_svuzp2_f16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.uzp2q.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2) + // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2q, _f16, , )(op1, op2); +} + +svfloat32_t test_svuzp2_f32(svfloat32_t op1, svfloat32_t op2) { + // CHECK-LABEL: test_svuzp2_f32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.uzp2q.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2) + // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2q, _f32, , )(op1, op2); +} + +svfloat64_t test_svuzp2_f64(svfloat64_t op1, svfloat64_t op2) { + // CHECK-LABEL: test_svuzp2_f64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.uzp2q.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2) + // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2q, _f64, , )(op1, op2); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_zip1-fp64.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_zip1-fp64.c new file mode 100644 index 000000000000..1e9e81dc45bb --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_zip1-fp64.c @@ -0,0 +1,88 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE_MATMUL_FP64 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE_MATMUL_FP64 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 +#endif + +svint8_t test_svzip1_s8(svint8_t op1, svint8_t op2) { + // CHECK-LABEL: test_svzip1_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.zip1q.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1q, _s8, , )(op1, op2); +} + +svint16_t test_svzip1_s16(svint16_t op1, svint16_t op2) { + // CHECK-LABEL: test_svzip1_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.zip1q.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1q, _s16, , )(op1, op2); +} + +svint32_t test_svzip1_s32(svint32_t op1, svint32_t op2) { + // CHECK-LABEL: test_svzip1_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.zip1q.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1q, _s32, , )(op1, op2); +} + +svint64_t test_svzip1_s64(svint64_t op1, svint64_t op2) { + // CHECK-LABEL: test_svzip1_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.zip1q.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1q, _s64, , )(op1, op2); +} + +svuint8_t test_svzip1_u8(svuint8_t op1, svuint8_t op2) { + // CHECK-LABEL: test_svzip1_u8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.zip1q.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1q, _u8, , )(op1, op2); +} + +svuint16_t test_svzip1_u16(svuint16_t op1, svuint16_t op2) { + // CHECK-LABEL: test_svzip1_u16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.zip1q.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1q, _u16, , )(op1, op2); +} + +svuint32_t test_svzip1_u32(svuint32_t op1, svuint32_t op2) { + // CHECK-LABEL: test_svzip1_u32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.zip1q.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1q, _u32, , )(op1, op2); +} + +svuint64_t test_svzip1_u64(svuint64_t op1, svuint64_t op2) { + // CHECK-LABEL: test_svzip1_u64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.zip1q.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1q, _u64, , )(op1, op2); +} + +svfloat16_t test_svzip1_f16(svfloat16_t op1, svfloat16_t op2) { + // CHECK-LABEL: test_svzip1_f16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.zip1q.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2) + // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1q, _f16, , )(op1, op2); +} + +svfloat32_t test_svzip1_f32(svfloat32_t op1, svfloat32_t op2) { + // CHECK-LABEL: test_svzip1_f32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.zip1q.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2) + // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1q, _f32, , )(op1, op2); +} + +svfloat64_t test_svzip1_f64(svfloat64_t op1, svfloat64_t op2) { + // CHECK-LABEL: test_svzip1_f64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.zip1q.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2) + // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1q, _f64, , )(op1, op2); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_zip2-fp64.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_zip2-fp64.c new file mode 100644 index 000000000000..ad33f565af89 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_zip2-fp64.c @@ -0,0 +1,88 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE_MATMUL_FP64 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE_MATMUL_FP64 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 +#endif + +svint8_t test_svzip2_s8(svint8_t op1, svint8_t op2) { + // CHECK-LABEL: test_svzip2_s8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.zip2q.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2q, _s8, , )(op1, op2); +} + +svint16_t test_svzip2_s16(svint16_t op1, svint16_t op2) { + // CHECK-LABEL: test_svzip2_s16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.zip2q.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2q, _s16, , )(op1, op2); +} + +svint32_t test_svzip2_s32(svint32_t op1, svint32_t op2) { + // CHECK-LABEL: test_svzip2_s32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.zip2q.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2q, _s32, , )(op1, op2); +} + +svint64_t test_svzip2_s64(svint64_t op1, svint64_t op2) { + // CHECK-LABEL: test_svzip2_s64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.zip2q.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2q, _s64, , )(op1, op2); +} + +svuint8_t test_svzip2_u8(svuint8_t op1, svuint8_t op2) { + // CHECK-LABEL: test_svzip2_u8 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.zip2q.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) + // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2q, _u8, , )(op1, op2); +} + +svuint16_t test_svzip2_u16(svuint16_t op1, svuint16_t op2) { + // CHECK-LABEL: test_svzip2_u16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.zip2q.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) + // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2q, _u16, , )(op1, op2); +} + +svuint32_t test_svzip2_u32(svuint32_t op1, svuint32_t op2) { + // CHECK-LABEL: test_svzip2_u32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.zip2q.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) + // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2q, _u32, , )(op1, op2); +} + +svuint64_t test_svzip2_u64(svuint64_t op1, svuint64_t op2) { + // CHECK-LABEL: test_svzip2_u64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.zip2q.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) + // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2q, _u64, , )(op1, op2); +} + +svfloat16_t test_svzip2_f16(svfloat16_t op1, svfloat16_t op2) { + // CHECK-LABEL: test_svzip2_f16 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.zip2q.nxv8f16(<vscale x 8 x half> %op1, <vscale x 8 x half> %op2) + // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2q, _f16, , )(op1, op2); +} + +svfloat32_t test_svzip2_f32(svfloat32_t op1, svfloat32_t op2) { + // CHECK-LABEL: test_svzip2_f32 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.zip2q.nxv4f32(<vscale x 4 x float> %op1, <vscale x 4 x float> %op2) + // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2q, _f32, , )(op1, op2); +} + +svfloat64_t test_svzip2_f64(svfloat64_t op1, svfloat64_t op2) { + // CHECK-LABEL: test_svzip2_f64 + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.zip2q.nxv2f64(<vscale x 2 x double> %op1, <vscale x 2 x double> %op2) + // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2q, _f64, , )(op1, op2); +} _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits