fpetrogalli created this revision. fpetrogalli added reviewers: kmclaughlin, efriedma, ctetreau, sdesmalen, david-arm. Herald added subscribers: llvm-commits, cfe-commits, psnobl, rkruppe, hiraditya, tschuett. Herald added projects: clang, LLVM.
Repository: rG LLVM Github Monorepo https://reviews.llvm.org/D82501 Files: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret-bfloat.c clang/utils/TableGen/SveEmitter.cpp llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td llvm/test/CodeGen/AArch64/sve-bitcast-bfloat.ll
Index: llvm/test/CodeGen/AArch64/sve-bitcast-bfloat.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-bitcast-bfloat.ll @@ -0,0 +1,119 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve,+bf16 < %s 2>%t | FileCheck %s +; RUN: not --crash llc -mtriple=aarch64_be -mattr=+sve,+bf16 < %s +; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t + +; WARN-NOT: warning + +define <vscale x 16 x i8> @bitcast_bfloat_to_i8(<vscale x 8 x bfloat> %v) { +; CHECK-LABEL: bitcast_bfloat_to_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %bc = bitcast <vscale x 8 x bfloat> %v to <vscale x 16 x i8> + ret <vscale x 16 x i8> %bc +} + +define <vscale x 8 x i16> @bitcast_bfloat_to_i16(<vscale x 8 x bfloat> %v) { +; CHECK-LABEL: bitcast_bfloat_to_i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %bc = bitcast <vscale x 8 x bfloat> %v to <vscale x 8 x i16> + ret <vscale x 8 x i16> %bc +} + +define <vscale x 4 x i32> @bitcast_bfloat_to_i32(<vscale x 8 x bfloat> %v) { +; CHECK-LABEL: bitcast_bfloat_to_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %bc = bitcast <vscale x 8 x bfloat> %v to <vscale x 4 x i32> + ret <vscale x 4 x i32> %bc +} + +define <vscale x 2 x i64> @bitcast_bfloat_to_i64(<vscale x 8 x bfloat> %v) { +; CHECK-LABEL: bitcast_bfloat_to_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %bc = bitcast <vscale x 8 x bfloat> %v to <vscale x 2 x i64> + ret <vscale x 2 x i64> %bc +} + +define <vscale x 8 x half> @bitcast_bfloat_to_half(<vscale x 8 x bfloat> %v) { +; CHECK-LABEL: bitcast_bfloat_to_half: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %bc = bitcast <vscale x 8 x bfloat> %v to <vscale x 8 x half> + ret <vscale x 8 x half> %bc +} + +define <vscale x 4 x float> @bitcast_bfloat_to_float(<vscale x 8 x bfloat> %v) { +; CHECK-LABEL: bitcast_bfloat_to_float: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %bc = bitcast <vscale x 8 x bfloat> %v to <vscale x 4 x float> + ret <vscale x 4 x float> %bc +} + +define <vscale x 2 x double> @bitcast_bfloat_to_double(<vscale x 8 x bfloat> %v) { +; CHECK-LABEL: bitcast_bfloat_to_double: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %bc = bitcast <vscale x 8 x bfloat> %v to <vscale x 2 x double> + ret <vscale x 2 x double> %bc +} + +define <vscale x 8 x bfloat> @bitcast_i8_to_bfloat(<vscale x 16 x i8> %v) { +; CHECK-LABEL: bitcast_i8_to_bfloat: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %bc = bitcast <vscale x 16 x i8> %v to <vscale x 8 x bfloat> + ret <vscale x 8 x bfloat> %bc +} + +define <vscale x 8 x bfloat> @bitcast_i16_to_bfloat(<vscale x 8 x i16> %v) { +; CHECK-LABEL: bitcast_i16_to_bfloat: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %bc = bitcast <vscale x 8 x i16> %v to <vscale x 8 x bfloat> + ret <vscale x 8 x bfloat> %bc +} + +define <vscale x 8 x bfloat> @bitcast_i32_to_bfloat(<vscale x 4 x i32> %v) { +; CHECK-LABEL: bitcast_i32_to_bfloat: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %bc = bitcast <vscale x 4 x i32> %v to <vscale x 8 x bfloat> + ret <vscale x 8 x bfloat> %bc +} + +define <vscale x 8 x bfloat> @bitcast_i64_to_bfloat(<vscale x 2 x i64> %v) { +; CHECK-LABEL: bitcast_i64_to_bfloat: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %bc = bitcast <vscale x 2 x i64> %v to <vscale x 8 x bfloat> + ret <vscale x 8 x bfloat> %bc +} + +define <vscale x 8 x bfloat> @bitcast_half_to_bfloat(<vscale x 8 x half> %v) { +; CHECK-LABEL: bitcast_half_to_bfloat: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %bc = bitcast <vscale x 8 x half> %v to <vscale x 8 x bfloat> + ret <vscale x 8 x bfloat> %bc +} + +define <vscale x 8 x bfloat> @bitcast_float_to_bfloat(<vscale x 4 x float> %v) { +; CHECK-LABEL: bitcast_float_to_bfloat: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %bc = bitcast <vscale x 4 x float> %v to <vscale x 8 x bfloat> + ret <vscale x 8 x bfloat> %bc +} + +define <vscale x 8 x bfloat> @bitcast_double_to_bfloat(<vscale x 2 x double> %v) { +; CHECK-LABEL: bitcast_double_to_bfloat: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %bc = bitcast <vscale x 2 x double> %v to <vscale x 8 x bfloat> + ret <vscale x 8 x bfloat> %bc +} + Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1435,7 +1435,6 @@ def : Pat<(nxv8f16 (bitconvert (nxv16i8 ZPR:$src))), (nxv8f16 ZPR:$src)>; def : Pat<(nxv8f16 (bitconvert (nxv8i16 ZPR:$src))), (nxv8f16 ZPR:$src)>; - def : Pat<(nxv8bf16 (bitconvert (nxv8i16 ZPR:$src))), (nxv8bf16 ZPR:$src)>; def : Pat<(nxv8f16 (bitconvert (nxv4i32 ZPR:$src))), (nxv8f16 ZPR:$src)>; def : Pat<(nxv8f16 (bitconvert (nxv2i64 ZPR:$src))), (nxv8f16 ZPR:$src)>; def : Pat<(nxv8f16 (bitconvert (nxv4f32 ZPR:$src))), (nxv8f16 ZPR:$src)>; @@ -1456,6 +1455,24 @@ def : Pat<(nxv2f64 (bitconvert (nxv4f32 ZPR:$src))), (nxv2f64 ZPR:$src)>; } + let Predicates = [IsLE, HasSVE, HasBF16] in { + def : Pat<(nxv8bf16 (bitconvert (nxv16i8 ZPR:$src))), (nxv8bf16 ZPR:$src)>; + def : Pat<(nxv8bf16 (bitconvert (nxv8i16 ZPR:$src))), (nxv8bf16 ZPR:$src)>; + def : Pat<(nxv8bf16 (bitconvert (nxv4i32 ZPR:$src))), (nxv8bf16 ZPR:$src)>; + def : Pat<(nxv8bf16 (bitconvert (nxv2i64 ZPR:$src))), (nxv8bf16 ZPR:$src)>; + def : Pat<(nxv8bf16 (bitconvert (nxv8f16 ZPR:$src))), (nxv8bf16 ZPR:$src)>; + def : Pat<(nxv8bf16 (bitconvert (nxv4f32 ZPR:$src))), (nxv8bf16 ZPR:$src)>; + def : Pat<(nxv8bf16 (bitconvert (nxv2f64 ZPR:$src))), (nxv8bf16 ZPR:$src)>; + + def : Pat<(nxv16i8 (bitconvert (nxv8bf16 ZPR:$src))), (nxv16i8 ZPR:$src)>; + def : Pat<(nxv8i16 (bitconvert (nxv8bf16 ZPR:$src))), (nxv8i16 ZPR:$src)>; + def : Pat<(nxv4i32 (bitconvert (nxv8bf16 ZPR:$src))), (nxv4i32 ZPR:$src)>; + def : Pat<(nxv2i64 (bitconvert (nxv8bf16 ZPR:$src))), (nxv2i64 ZPR:$src)>; + def : Pat<(nxv8f16 (bitconvert (nxv8bf16 ZPR:$src))), (nxv8f16 ZPR:$src)>; + def : Pat<(nxv4f32 (bitconvert (nxv8bf16 ZPR:$src))), (nxv4f32 ZPR:$src)>; + def : Pat<(nxv2f64 (bitconvert (nxv8bf16 ZPR:$src))), (nxv2f64 ZPR:$src)>; + } + def : Pat<(nxv16i1 (reinterpret_cast (nxv16i1 PPR:$src))), (COPY_TO_REGCLASS PPR:$src, PPR)>; def : Pat<(nxv16i1 (reinterpret_cast (nxv8i1 PPR:$src))), (COPY_TO_REGCLASS PPR:$src, PPR)>; def : Pat<(nxv16i1 (reinterpret_cast (nxv4i1 PPR:$src))), (COPY_TO_REGCLASS PPR:$src, PPR)>; Index: clang/utils/TableGen/SveEmitter.cpp =================================================================== --- clang/utils/TableGen/SveEmitter.cpp +++ clang/utils/TableGen/SveEmitter.cpp @@ -248,13 +248,13 @@ const char *Type; const char *BuiltinType; }; - SmallVector<ReinterpretTypeInfo, 11> Reinterprets = { + SmallVector<ReinterpretTypeInfo, 12> Reinterprets = { {"s8", "svint8_t", "q16Sc"}, {"s16", "svint16_t", "q8Ss"}, {"s32", "svint32_t", "q4Si"}, {"s64", "svint64_t", "q2SWi"}, {"u8", "svuint8_t", "q16Uc"}, {"u16", "svuint16_t", "q8Us"}, {"u32", "svuint32_t", "q4Ui"}, {"u64", "svuint64_t", "q2UWi"}, - {"f16", "svfloat16_t", "q8h"}, {"f32", "svfloat32_t", "q4f"}, - {"f64", "svfloat64_t", "q2d"}}; + {"f16", "svfloat16_t", "q8h"}, {"bf16", "svbfloat16_t", "q8y"}, + {"f32", "svfloat32_t", "q4f"}, {"f64", "svfloat64_t", "q2d"}}; RecordKeeper &Records; llvm::StringMap<uint64_t> EltTypes; @@ -1208,6 +1208,10 @@ for (auto ShortForm : { false, true } ) for (const ReinterpretTypeInfo &From : Reinterprets) for (const ReinterpretTypeInfo &To : Reinterprets) { + const bool IsBFloat = StringRef(From.Suffix).equals("bf16") || + StringRef(To.Suffix).equals("bf16"); + if (IsBFloat) + OS << "#if defined(__ARM_FEATURE_SVE_BF16)\n"; if (ShortForm) { OS << "__aio " << From.Type << " svreinterpret_" << From.Suffix; OS << "(" << To.Type << " op) {\n"; @@ -1218,6 +1222,8 @@ OS << "#define svreinterpret_" << From.Suffix << "_" << To.Suffix << "(...) __builtin_sve_reinterpret_" << From.Suffix << "_" << To.Suffix << "(__VA_ARGS__)\n"; + if (IsBFloat) + OS << "#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */\n"; } SmallVector<std::unique_ptr<Intrinsic>, 128> Defs; Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret-bfloat.c =================================================================== --- /dev/null +++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret-bfloat.c @@ -0,0 +1,171 @@ +// REQUIRES: aarch64-registered-target +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE_BF16 -D__ARM_FEATURE_BF16_SCALAR_ARITHMETIC -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE_BF16 -D__ARM_FEATURE_BF16_SCALAR_ARITHMETIC -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include <arm_sve.h> + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 +#endif + +svint8_t test_svreinterpret_s8_bf16(svbfloat16_t op) { + // CHECK-LABEL: test_svreinterpret_s8_bf16 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 16 x i8> + // CHECK: ret <vscale x 16 x i8> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_s8, _bf16, , )(op); +} + +svint16_t test_svreinterpret_s16_bf16(svbfloat16_t op) { + // CHECK-LABEL: test_svreinterpret_s16_bf16 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 8 x i16> + // CHECK: ret <vscale x 8 x i16> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_s16, _bf16, , )(op); +} + +svint32_t test_svreinterpret_s32_bf16(svbfloat16_t op) { + // CHECK-LABEL: test_svreinterpret_s32_bf16 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 4 x i32> + // CHECK: ret <vscale x 4 x i32> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_s32, _bf16, , )(op); +} +svint64_t test_svreinterpret_s64_bf16(svbfloat16_t op) { + // CHECK-LABEL: test_svreinterpret_s64_bf16 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 2 x i64> + // CHECK: ret <vscale x 2 x i64> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_s64, _bf16, , )(op); +} + +svuint8_t test_svreinterpret_u8_bf16(svbfloat16_t op) { + // CHECK-LABEL: test_svreinterpret_u8_bf16 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 16 x i8> + // CHECK: ret <vscale x 16 x i8> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_u8, _bf16, , )(op); +} + +svuint16_t test_svreinterpret_u16_bf16(svbfloat16_t op) { + // CHECK-LABEL: test_svreinterpret_u16_bf16 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 8 x i16> + // CHECK: ret <vscale x 8 x i16> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_u16, _bf16, , )(op); +} + +svuint32_t test_svreinterpret_u32_bf16(svbfloat16_t op) { + // CHECK-LABEL: test_svreinterpret_u32_bf16 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 4 x i32> + // CHECK: ret <vscale x 4 x i32> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_u32, _bf16, , )(op); +} + +svuint64_t test_svreinterpret_u64_bf16(svbfloat16_t op) { + // CHECK-LABEL: test_svreinterpret_u64_bf16 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 2 x i64> + // CHECK: ret <vscale x 2 x i64> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_u64, _bf16, , )(op); +} + +svbfloat16_t test_svreinterpret_bf16_s8(svint8_t op) { + // CHECK-LABEL: test_svreinterpret_bf16_s8 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 16 x i8> %op to <vscale x 8 x bfloat> + // CHECK: ret <vscale x 8 x bfloat> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_bf16, _s8, , )(op); +} + +svbfloat16_t test_svreinterpret_bf16_s16(svint16_t op) { + // CHECK-LABEL: test_svreinterpret_bf16_s16 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x i16> %op to <vscale x 8 x bfloat> + // CHECK: ret <vscale x 8 x bfloat> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_bf16, _s16, , )(op); +} + +svbfloat16_t test_svreinterpret_bf16_s32(svint32_t op) { + // CHECK-LABEL: test_svreinterpret_bf16_s32 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 4 x i32> %op to <vscale x 8 x bfloat> + // CHECK: ret <vscale x 8 x bfloat> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_bf16, _s32, , )(op); +} + +svbfloat16_t test_svreinterpret_bf16_s64(svint64_t op) { + // CHECK-LABEL: test_svreinterpret_bf16_s64 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 2 x i64> %op to <vscale x 8 x bfloat> + // CHECK: ret <vscale x 8 x bfloat> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_bf16, _s64, , )(op); +} + +svbfloat16_t test_svreinterpret_bf16_u8(svuint8_t op) { + // CHECK-LABEL: test_svreinterpret_bf16_u8 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 16 x i8> %op to <vscale x 8 x bfloat> + // CHECK: ret <vscale x 8 x bfloat> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_bf16, _u8, , )(op); +} + +svbfloat16_t test_svreinterpret_bf16_u16(svuint16_t op) { + // CHECK-LABEL: test_svreinterpret_bf16_u16 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x i16> %op to <vscale x 8 x bfloat> + // CHECK: ret <vscale x 8 x bfloat> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_bf16, _u16, , )(op); +} + +svbfloat16_t test_svreinterpret_bf16_u32(svuint32_t op) { + // CHECK-LABEL: test_svreinterpret_bf16_u32 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 4 x i32> %op to <vscale x 8 x bfloat> + // CHECK: ret <vscale x 8 x bfloat> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_bf16, _u32, , )(op); +} + +svbfloat16_t test_svreinterpret_bf16_u64(svuint64_t op) { + // CHECK-LABEL: test_svreinterpret_bf16_u64 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 2 x i64> %op to <vscale x 8 x bfloat> + // CHECK: ret <vscale x 8 x bfloat> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_bf16, _u64, , )(op); +} + +svbfloat16_t test_svreinterpret_bf16_bf16(svbfloat16_t op) { + // CHECK-LABEL: test_svreinterpret_bf16_bf16 + // CHECK: ret <vscale x 8 x bfloat> %op + return SVE_ACLE_FUNC(svreinterpret_bf16, _bf16, , )(op); +} + +svbfloat16_t test_svreinterpret_bf16_f16(svfloat16_t op) { + // CHECK-LABEL: test_svreinterpret_bf16_f16 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x half> %op to <vscale x 8 x bfloat> + // CHECK: ret <vscale x 8 x bfloat> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_bf16, _f16, , )(op); +} + +svbfloat16_t test_svreinterpret_bf16_f32(svfloat32_t op) { + // CHECK-LABEL: test_svreinterpret_bf16_f32 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 4 x float> %op to <vscale x 8 x bfloat> + // CHECK: ret <vscale x 8 x bfloat> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_bf16, _f32, , )(op); +} + +svbfloat16_t test_svreinterpret_bf16_f64(svfloat64_t op) { + // CHECK-LABEL: test_svreinterpret_bf16_f64 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 2 x double> %op to <vscale x 8 x bfloat> + // CHECK: ret <vscale x 8 x bfloat> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_bf16, _f64, , )(op); +} + +svfloat32_t test_svreinterpret_f32_bf16(svbfloat16_t op) { + // CHECK-LABEL: test_svreinterpret_f32_bf16 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 4 x float> + // CHECK: ret <vscale x 4 x float> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_f32, _bf16, , )(op); +} + +svfloat16_t test_svreinterpret_f16_bf16(svbfloat16_t op) { + // CHECK-LABEL: test_svreinterpret_f16_bf16 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 8 x half> + // CHECK: ret <vscale x 8 x half> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_f16, _bf16, , )(op); +} + +svfloat64_t test_svreinterpret_f64_bf16(svbfloat16_t op) { + // CHECK-LABEL: test_svreinterpret_f64_bf16 + // CHECK: %[[CAST:.*]] = bitcast <vscale x 8 x bfloat> %op to <vscale x 2 x double> + // CHECK: ret <vscale x 2 x double> %[[CAST]] + return SVE_ACLE_FUNC(svreinterpret_f64, _bf16, , )(op); +}
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits