Author: Hsiangkai Wang Date: 2020-12-20T17:39:20+08:00 New Revision: 41ab45d6624602ba10486f044c0fd06db5b9bedb
URL: https://github.com/llvm/llvm-project/commit/41ab45d6624602ba10486f044c0fd06db5b9bedb DIFF: https://github.com/llvm/llvm-project/commit/41ab45d6624602ba10486f044c0fd06db5b9bedb.diff LOG: [RISCV] Define vector vfwmul intrinsics. Define vector vfwmul intrinsics and lower them to V instructions. We work with @rogfer01 from BSC to come out this patch. Authored-by: Roger Ferrer Ibanez <rofir...@gmail.com> Co-Authored-by: Hsiangkai Wang <kai.w...@sifive.com> Differential Revision: https://reviews.llvm.org/D93584 Added: llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll Modified: llvm/include/llvm/IR/IntrinsicsRISCV.td llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td Removed: ################################################################################ diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 015585780e58..df289d9714f7 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -423,6 +423,8 @@ let TargetPrefix = "riscv" in { defm vfdiv : RISCVBinaryAAX; defm vfrdiv : RISCVBinaryAAX; + defm vfwmul : RISCVBinaryABX; + defm vfsgnj : RISCVBinaryAAX; defm vfsgnjn : RISCVBinaryAAX; defm vfsgnjx : RISCVBinaryAAX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 150cf58b0339..52c4211a5855 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1543,6 +1543,11 @@ defm PseudoVFMUL : VPseudoBinaryV_VV_VX</*IsFloat=*/1>; defm PseudoVFDIV : VPseudoBinaryV_VV_VX</*IsFloat=*/1>; defm PseudoVFRDIV : VPseudoBinaryV_VX</*IsFloat=*/1>; +//===----------------------------------------------------------------------===// +// 14.5. Vector Widening Floating-Point Multiply +//===----------------------------------------------------------------------===// +defm PseudoVFWMUL : VPseudoBinaryW_VV_VX</*IsFloat=*/1>; + //===----------------------------------------------------------------------===// // 14.12. Vector Floating-Point Sign-Injection Instructions //===----------------------------------------------------------------------===// @@ -1829,6 +1834,11 @@ defm "" : VPatBinaryV_VV_VX<"int_riscv_vfmul", "PseudoVFMUL", AllFloatVectors>; defm "" : VPatBinaryV_VV_VX<"int_riscv_vfdiv", "PseudoVFDIV", AllFloatVectors>; defm "" : VPatBinaryV_VX<"int_riscv_vfrdiv", "PseudoVFRDIV", AllFloatVectors>; +//===----------------------------------------------------------------------===// +// 14.5. Vector Widening Floating-Point Multiply +//===----------------------------------------------------------------------===// +defm "" : VPatBinaryW_VV_VX<"int_riscv_vfwmul", "PseudoVFWMUL", AllWidenableFloatVectors>; + //===----------------------------------------------------------------------===// // 14.12. Vector Floating-Point Sign-Injection Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll new file mode 100644 index 000000000000..80448534d1c1 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll @@ -0,0 +1,401 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f16( + <vscale x 1 x half>, + <vscale x 1 x half>, + i32); + +define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f16( + <vscale x 1 x half> %0, + <vscale x 1 x half> %1, + i32 %2) + + ret <vscale x 1 x float> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16( + <vscale x 1 x float>, + <vscale x 1 x half>, + <vscale x 1 x half>, + <vscale x 1 x i1>, + i32); + +define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16( + <vscale x 1 x float> %0, + <vscale x 1 x half> %1, + <vscale x 1 x half> %2, + <vscale x 1 x i1> %3, + i32 %4) + + ret <vscale x 1 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f16( + <vscale x 2 x half>, + <vscale x 2 x half>, + i32); + +define <vscale x 2 x float> @intrinsic_vfwmul_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f16( + <vscale x 2 x half> %0, + <vscale x 2 x half> %1, + i32 %2) + + ret <vscale x 2 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16( + <vscale x 2 x float>, + <vscale x 2 x half>, + <vscale x 2 x half>, + <vscale x 2 x i1>, + i32); + +define <vscale x 2 x float> @intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16( + <vscale x 2 x float> %0, + <vscale x 2 x half> %1, + <vscale x 2 x half> %2, + <vscale x 2 x i1> %3, + i32 %4) + + ret <vscale x 2 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f16( + <vscale x 4 x half>, + <vscale x 4 x half>, + i32); + +define <vscale x 4 x float> @intrinsic_vfwmul_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f16( + <vscale x 4 x half> %0, + <vscale x 4 x half> %1, + i32 %2) + + ret <vscale x 4 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16( + <vscale x 4 x float>, + <vscale x 4 x half>, + <vscale x 4 x half>, + <vscale x 4 x i1>, + i32); + +define <vscale x 4 x float> @intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16( + <vscale x 4 x float> %0, + <vscale x 4 x half> %1, + <vscale x 4 x half> %2, + <vscale x 4 x i1> %3, + i32 %4) + + ret <vscale x 4 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f16( + <vscale x 8 x half>, + <vscale x 8 x half>, + i32); + +define <vscale x 8 x float> @intrinsic_vfwmul_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f16( + <vscale x 8 x half> %0, + <vscale x 8 x half> %1, + i32 %2) + + ret <vscale x 8 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16( + <vscale x 8 x float>, + <vscale x 8 x half>, + <vscale x 8 x half>, + <vscale x 8 x i1>, + i32); + +define <vscale x 8 x float> @intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16( + <vscale x 8 x float> %0, + <vscale x 8 x half> %1, + <vscale x 8 x half> %2, + <vscale x 8 x i1> %3, + i32 %4) + + ret <vscale x 8 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f16( + <vscale x 16 x half>, + <vscale x 16 x half>, + i32); + +define <vscale x 16 x float> @intrinsic_vfwmul_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f16( + <vscale x 16 x half> %0, + <vscale x 16 x half> %1, + i32 %2) + + ret <vscale x 16 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16( + <vscale x 16 x float>, + <vscale x 16 x half>, + <vscale x 16 x half>, + <vscale x 16 x i1>, + i32); + +define <vscale x 16 x float> @intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16( + <vscale x 16 x float> %0, + <vscale x 16 x half> %1, + <vscale x 16 x half> %2, + <vscale x 16 x i1> %3, + i32 %4) + + ret <vscale x 16 x float> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f16.f16( + <vscale x 1 x half>, + half, + i32); + +define <vscale x 1 x float> @intrinsic_vfwmul_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f16.f16( + <vscale x 1 x half> %0, + half %1, + i32 %2) + + ret <vscale x 1 x float> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16.f16( + <vscale x 1 x float>, + <vscale x 1 x half>, + half, + <vscale x 1 x i1>, + i32); + +define <vscale x 1 x float> @intrinsic_vfwmul_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16.f16( + <vscale x 1 x float> %0, + <vscale x 1 x half> %1, + half %2, + <vscale x 1 x i1> %3, + i32 %4) + + ret <vscale x 1 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f16.f16( + <vscale x 2 x half>, + half, + i32); + +define <vscale x 2 x float> @intrinsic_vfwmul_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f16.f16( + <vscale x 2 x half> %0, + half %1, + i32 %2) + + ret <vscale x 2 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16.f16( + <vscale x 2 x float>, + <vscale x 2 x half>, + half, + <vscale x 2 x i1>, + i32); + +define <vscale x 2 x float> @intrinsic_vfwmul_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16.f16( + <vscale x 2 x float> %0, + <vscale x 2 x half> %1, + half %2, + <vscale x 2 x i1> %3, + i32 %4) + + ret <vscale x 2 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f16.f16( + <vscale x 4 x half>, + half, + i32); + +define <vscale x 4 x float> @intrinsic_vfwmul_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f16.f16( + <vscale x 4 x half> %0, + half %1, + i32 %2) + + ret <vscale x 4 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16.f16( + <vscale x 4 x float>, + <vscale x 4 x half>, + half, + <vscale x 4 x i1>, + i32); + +define <vscale x 4 x float> @intrinsic_vfwmul_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16.f16( + <vscale x 4 x float> %0, + <vscale x 4 x half> %1, + half %2, + <vscale x 4 x i1> %3, + i32 %4) + + ret <vscale x 4 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f16.f16( + <vscale x 8 x half>, + half, + i32); + +define <vscale x 8 x float> @intrinsic_vfwmul_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f16.f16( + <vscale x 8 x half> %0, + half %1, + i32 %2) + + ret <vscale x 8 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16.f16( + <vscale x 8 x float>, + <vscale x 8 x half>, + half, + <vscale x 8 x i1>, + i32); + +define <vscale x 8 x float> @intrinsic_vfwmul_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16.f16( + <vscale x 8 x float> %0, + <vscale x 8 x half> %1, + half %2, + <vscale x 8 x i1> %3, + i32 %4) + + ret <vscale x 8 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f16.f16( + <vscale x 16 x half>, + half, + i32); + +define <vscale x 16 x float> @intrinsic_vfwmul_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f16.f16( + <vscale x 16 x half> %0, + half %1, + i32 %2) + + ret <vscale x 16 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16.f16( + <vscale x 16 x float>, + <vscale x 16 x half>, + half, + <vscale x 16 x i1>, + i32); + +define <vscale x 16 x float> @intrinsic_vfwmul_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16.f16( + <vscale x 16 x float> %0, + <vscale x 16 x half> %1, + half %2, + <vscale x 16 x i1> %3, + i32 %4) + + ret <vscale x 16 x float> %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll new file mode 100644 index 000000000000..82f5b0fa957e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll @@ -0,0 +1,721 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f16( + <vscale x 1 x half>, + <vscale x 1 x half>, + i64); + +define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f16( + <vscale x 1 x half> %0, + <vscale x 1 x half> %1, + i64 %2) + + ret <vscale x 1 x float> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16( + <vscale x 1 x float>, + <vscale x 1 x half>, + <vscale x 1 x half>, + <vscale x 1 x i1>, + i64); + +define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16( + <vscale x 1 x float> %0, + <vscale x 1 x half> %1, + <vscale x 1 x half> %2, + <vscale x 1 x i1> %3, + i64 %4) + + ret <vscale x 1 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f16( + <vscale x 2 x half>, + <vscale x 2 x half>, + i64); + +define <vscale x 2 x float> @intrinsic_vfwmul_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f16( + <vscale x 2 x half> %0, + <vscale x 2 x half> %1, + i64 %2) + + ret <vscale x 2 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16( + <vscale x 2 x float>, + <vscale x 2 x half>, + <vscale x 2 x half>, + <vscale x 2 x i1>, + i64); + +define <vscale x 2 x float> @intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16( + <vscale x 2 x float> %0, + <vscale x 2 x half> %1, + <vscale x 2 x half> %2, + <vscale x 2 x i1> %3, + i64 %4) + + ret <vscale x 2 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f16( + <vscale x 4 x half>, + <vscale x 4 x half>, + i64); + +define <vscale x 4 x float> @intrinsic_vfwmul_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f16( + <vscale x 4 x half> %0, + <vscale x 4 x half> %1, + i64 %2) + + ret <vscale x 4 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16( + <vscale x 4 x float>, + <vscale x 4 x half>, + <vscale x 4 x half>, + <vscale x 4 x i1>, + i64); + +define <vscale x 4 x float> @intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16( + <vscale x 4 x float> %0, + <vscale x 4 x half> %1, + <vscale x 4 x half> %2, + <vscale x 4 x i1> %3, + i64 %4) + + ret <vscale x 4 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f16( + <vscale x 8 x half>, + <vscale x 8 x half>, + i64); + +define <vscale x 8 x float> @intrinsic_vfwmul_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f16( + <vscale x 8 x half> %0, + <vscale x 8 x half> %1, + i64 %2) + + ret <vscale x 8 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16( + <vscale x 8 x float>, + <vscale x 8 x half>, + <vscale x 8 x half>, + <vscale x 8 x i1>, + i64); + +define <vscale x 8 x float> @intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16( + <vscale x 8 x float> %0, + <vscale x 8 x half> %1, + <vscale x 8 x half> %2, + <vscale x 8 x i1> %3, + i64 %4) + + ret <vscale x 8 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f16( + <vscale x 16 x half>, + <vscale x 16 x half>, + i64); + +define <vscale x 16 x float> @intrinsic_vfwmul_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f16( + <vscale x 16 x half> %0, + <vscale x 16 x half> %1, + i64 %2) + + ret <vscale x 16 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16( + <vscale x 16 x float>, + <vscale x 16 x half>, + <vscale x 16 x half>, + <vscale x 16 x i1>, + i64); + +define <vscale x 16 x float> @intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16( + <vscale x 16 x float> %0, + <vscale x 16 x half> %1, + <vscale x 16 x half> %2, + <vscale x 16 x i1> %3, + i64 %4) + + ret <vscale x 16 x float> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f32( + <vscale x 1 x float>, + <vscale x 1 x float>, + i64); + +define <vscale x 1 x double> @intrinsic_vfwmul_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f32( + <vscale x 1 x float> %0, + <vscale x 1 x float> %1, + i64 %2) + + ret <vscale x 1 x double> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f32( + <vscale x 1 x double>, + <vscale x 1 x float>, + <vscale x 1 x float>, + <vscale x 1 x i1>, + i64); + +define <vscale x 1 x double> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f32( + <vscale x 1 x double> %0, + <vscale x 1 x float> %1, + <vscale x 1 x float> %2, + <vscale x 1 x i1> %3, + i64 %4) + + ret <vscale x 1 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f32( + <vscale x 2 x float>, + <vscale x 2 x float>, + i64); + +define <vscale x 2 x double> @intrinsic_vfwmul_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f32( + <vscale x 2 x float> %0, + <vscale x 2 x float> %1, + i64 %2) + + ret <vscale x 2 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f32( + <vscale x 2 x double>, + <vscale x 2 x float>, + <vscale x 2 x float>, + <vscale x 2 x i1>, + i64); + +define <vscale x 2 x double> @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f32( + <vscale x 2 x double> %0, + <vscale x 2 x float> %1, + <vscale x 2 x float> %2, + <vscale x 2 x i1> %3, + i64 %4) + + ret <vscale x 2 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f32( + <vscale x 4 x float>, + <vscale x 4 x float>, + i64); + +define <vscale x 4 x double> @intrinsic_vfwmul_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f32( + <vscale x 4 x float> %0, + <vscale x 4 x float> %1, + i64 %2) + + ret <vscale x 4 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f32( + <vscale x 4 x double>, + <vscale x 4 x float>, + <vscale x 4 x float>, + <vscale x 4 x i1>, + i64); + +define <vscale x 4 x double> @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f32( + <vscale x 4 x double> %0, + <vscale x 4 x float> %1, + <vscale x 4 x float> %2, + <vscale x 4 x i1> %3, + i64 %4) + + ret <vscale x 4 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f32( + <vscale x 8 x float>, + <vscale x 8 x float>, + i64); + +define <vscale x 8 x double> @intrinsic_vfwmul_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f32( + <vscale x 8 x float> %0, + <vscale x 8 x float> %1, + i64 %2) + + ret <vscale x 8 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f32( + <vscale x 8 x double>, + <vscale x 8 x float>, + <vscale x 8 x float>, + <vscale x 8 x i1>, + i64); + +define <vscale x 8 x double> @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f32( + <vscale x 8 x double> %0, + <vscale x 8 x float> %1, + <vscale x 8 x float> %2, + <vscale x 8 x i1> %3, + i64 %4) + + ret <vscale x 8 x double> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f16.f16( + <vscale x 1 x half>, + half, + i64); + +define <vscale x 1 x float> @intrinsic_vfwmul_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f16.f16( + <vscale x 1 x half> %0, + half %1, + i64 %2) + + ret <vscale x 1 x float> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16.f16( + <vscale x 1 x float>, + <vscale x 1 x half>, + half, + <vscale x 1 x i1>, + i64); + +define <vscale x 1 x float> @intrinsic_vfwmul_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16.f16( + <vscale x 1 x float> %0, + <vscale x 1 x half> %1, + half %2, + <vscale x 1 x i1> %3, + i64 %4) + + ret <vscale x 1 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f16.f16( + <vscale x 2 x half>, + half, + i64); + +define <vscale x 2 x float> @intrinsic_vfwmul_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f16.f16( + <vscale x 2 x half> %0, + half %1, + i64 %2) + + ret <vscale x 2 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16.f16( + <vscale x 2 x float>, + <vscale x 2 x half>, + half, + <vscale x 2 x i1>, + i64); + +define <vscale x 2 x float> @intrinsic_vfwmul_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16.f16( + <vscale x 2 x float> %0, + <vscale x 2 x half> %1, + half %2, + <vscale x 2 x i1> %3, + i64 %4) + + ret <vscale x 2 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f16.f16( + <vscale x 4 x half>, + half, + i64); + +define <vscale x 4 x float> @intrinsic_vfwmul_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f16.f16( + <vscale x 4 x half> %0, + half %1, + i64 %2) + + ret <vscale x 4 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16.f16( + <vscale x 4 x float>, + <vscale x 4 x half>, + half, + <vscale x 4 x i1>, + i64); + +define <vscale x 4 x float> @intrinsic_vfwmul_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16.f16( + <vscale x 4 x float> %0, + <vscale x 4 x half> %1, + half %2, + <vscale x 4 x i1> %3, + i64 %4) + + ret <vscale x 4 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f16.f16( + <vscale x 8 x half>, + half, + i64); + +define <vscale x 8 x float> @intrinsic_vfwmul_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f16.f16( + <vscale x 8 x half> %0, + half %1, + i64 %2) + + ret <vscale x 8 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16.f16( + <vscale x 8 x float>, + <vscale x 8 x half>, + half, + <vscale x 8 x i1>, + i64); + +define <vscale x 8 x float> @intrinsic_vfwmul_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16.f16( + <vscale x 8 x float> %0, + <vscale x 8 x half> %1, + half %2, + <vscale x 8 x i1> %3, + i64 %4) + + ret <vscale x 8 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f16.f16( + <vscale x 16 x half>, + half, + i64); + +define <vscale x 16 x float> @intrinsic_vfwmul_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f16.f16( + <vscale x 16 x half> %0, + half %1, + i64 %2) + + ret <vscale x 16 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16.f16( + <vscale x 16 x float>, + <vscale x 16 x half>, + half, + <vscale x 16 x i1>, + i64); + +define <vscale x 16 x float> @intrinsic_vfwmul_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16.f16( + <vscale x 16 x float> %0, + <vscale x 16 x half> %1, + half %2, + <vscale x 16 x i1> %3, + i64 %4) + + ret <vscale x 16 x float> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f32.f32( + <vscale x 1 x float>, + float, + i64); + +define <vscale x 1 x double> @intrinsic_vfwmul_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f32.f32( + <vscale x 1 x float> %0, + float %1, + i64 %2) + + ret <vscale x 1 x double> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f32.f32( + <vscale x 1 x double>, + <vscale x 1 x float>, + float, + <vscale x 1 x i1>, + i64); + +define <vscale x 1 x double> @intrinsic_vfwmul_mask_vf_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f32.f32( + <vscale x 1 x double> %0, + <vscale x 1 x float> %1, + float %2, + <vscale x 1 x i1> %3, + i64 %4) + + ret <vscale x 1 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f32.f32( + <vscale x 2 x float>, + float, + i64); + +define <vscale x 2 x double> @intrinsic_vfwmul_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f32.f32( + <vscale x 2 x float> %0, + float %1, + i64 %2) + + ret <vscale x 2 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f32.f32( + <vscale x 2 x double>, + <vscale x 2 x float>, + float, + <vscale x 2 x i1>, + i64); + +define <vscale x 2 x double> @intrinsic_vfwmul_mask_vf_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f32.f32( + <vscale x 2 x double> %0, + <vscale x 2 x float> %1, + float %2, + <vscale x 2 x i1> %3, + i64 %4) + + ret <vscale x 2 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f32.f32( + <vscale x 4 x float>, + float, + i64); + +define <vscale x 4 x double> @intrinsic_vfwmul_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f32.f32( + <vscale x 4 x float> %0, + float %1, + i64 %2) + + ret <vscale x 4 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f32.f32( + <vscale x 4 x double>, + <vscale x 4 x float>, + float, + <vscale x 4 x i1>, + i64); + +define <vscale x 4 x double> @intrinsic_vfwmul_mask_vf_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f32.f32( + <vscale x 4 x double> %0, + <vscale x 4 x float> %1, + float %2, + <vscale x 4 x i1> %3, + i64 %4) + + ret <vscale x 4 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f32.f32( + <vscale x 8 x float>, + float, + i64); + +define <vscale x 8 x double> @intrinsic_vfwmul_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f32.f32( + <vscale x 8 x float> %0, + float %1, + i64 %2) + + ret <vscale x 8 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f32.f32( + <vscale x 8 x double>, + <vscale x 8 x float>, + float, + <vscale x 8 x i1>, + i64); + +define <vscale x 8 x double> @intrinsic_vfwmul_mask_vf_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f32.f32( + <vscale x 8 x double> %0, + <vscale x 8 x float> %1, + float %2, + <vscale x 8 x i1> %3, + i64 %4) + + ret <vscale x 8 x double> %a +} _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits