Author: Craig Topper Date: 2020-12-29T10:00:04-08:00 New Revision: 2ae760e27e6ad27cf16603e2fa805bec45efc68c
URL: https://github.com/llvm/llvm-project/commit/2ae760e27e6ad27cf16603e2fa805bec45efc68c DIFF: https://github.com/llvm/llvm-project/commit/2ae760e27e6ad27cf16603e2fa805bec45efc68c.diff LOG: [RISCV] Add earlyclobber of destination register to vmsbf.m/vmsif.m/vmsof.m instructions The spec for these instructions include this note. "The destination register cannot overlap either the source register or the mask register ('v0') if the instruction is masked." So we need earlyclobber to enforce this constraint. I've regenerated the tests with update_llc_test_checks.py to show the effects of the earlyclobber. Reviewed By: khchen, frasercrmck Differential Revision: https://reviews.llvm.org/D93867 Added: Modified: llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll Removed: ################################################################################ diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 0068b5af2a0e..b50109eecac0 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -820,11 +820,12 @@ multiclass VPseudoUnaryS_M { } multiclass VPseudoUnaryM_M { + defvar constraint = "@earlyclobber $rd"; foreach mti = AllMasks in { let VLMul = mti.LMul.value in { - def "_M_" # mti.BX : VPseudoUnaryNoMask<VR, VR>; - def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR>; + def "_M_" # mti.BX : VPseudoUnaryNoMask<VR, VR, constraint>; + def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR, constraint>; } } } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll index b0ee5ab3a27f..8be6aab802d8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s declare <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1( @@ -5,10 +6,13 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1( i32); define <vscale x 1 x i1> @intrinsic_vmsbf_m_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsbf.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_m_nxv1i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1( <vscale x 1 x i1> %0, i32 %1) @@ -22,10 +26,15 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1( i32); define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsbf.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1( <vscale x 1 x i1> %0, <vscale x 1 x i1> %1, @@ -39,10 +48,13 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1( i32); define <vscale x 2 x i1> @intrinsic_vmsbf_m_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsbf.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_m_nxv2i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1( <vscale x 2 x i1> %0, i32 %1) @@ -56,10 +68,15 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1( i32); define <vscale x 2 x i1> @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsbf.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1( <vscale x 2 x i1> %0, <vscale x 2 x i1> %1, @@ -73,10 +90,13 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1( i32); define <vscale x 4 x i1> @intrinsic_vmsbf_m_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsbf.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_m_nxv4i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1( <vscale x 4 x i1> %0, i32 %1) @@ -90,10 +110,15 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1( i32); define <vscale x 4 x i1> @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsbf.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1( <vscale x 4 x i1> %0, <vscale x 4 x i1> %1, @@ -107,10 +132,13 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1( i32); define <vscale x 8 x i1> @intrinsic_vmsbf_m_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsbf.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_m_nxv8i1 -; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1( <vscale x 8 x i1> %0, i32 %1) @@ -124,10 +152,15 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1( i32); define <vscale x 8 x i1> @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsbf.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1 -; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1( <vscale x 8 x i1> %0, <vscale x 8 x i1> %1, @@ -141,10 +174,13 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1( i32); define <vscale x 16 x i1> @intrinsic_vmsbf_m_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsbf.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_m_nxv16i1 -; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1( <vscale x 16 x i1> %0, i32 %1) @@ -158,10 +194,15 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1( i32); define <vscale x 16 x i1> @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsbf.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1 -; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1( <vscale x 16 x i1> %0, <vscale x 16 x i1> %1, @@ -175,10 +216,13 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1( i32); define <vscale x 32 x i1> @intrinsic_vmsbf_m_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsbf.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_m_nxv32i1 -; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1( <vscale x 32 x i1> %0, i32 %1) @@ -192,10 +236,15 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1( i32); define <vscale x 32 x i1> @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsbf.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1 -; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1( <vscale x 32 x i1> %0, <vscale x 32 x i1> %1, @@ -209,10 +258,13 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1( i32); define <vscale x 64 x i1> @intrinsic_vmsbf_m_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vmsbf.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_m_nxv64i1 -; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1( <vscale x 64 x i1> %0, i32 %1) @@ -226,10 +278,15 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1( i32); define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsbf.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1 -; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1( <vscale x 64 x i1> %0, <vscale x 64 x i1> %1, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll index 3dce4a537523..e956d703518d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s declare <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1( @@ -5,10 +6,13 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1( i64); define <vscale x 1 x i1> @intrinsic_vmsbf_m_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsbf.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_m_nxv1i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1( <vscale x 1 x i1> %0, i64 %1) @@ -22,10 +26,15 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1( i64); define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsbf.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1( <vscale x 1 x i1> %0, <vscale x 1 x i1> %1, @@ -39,10 +48,13 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1( i64); define <vscale x 2 x i1> @intrinsic_vmsbf_m_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsbf.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_m_nxv2i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1( <vscale x 2 x i1> %0, i64 %1) @@ -56,10 +68,15 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1( i64); define <vscale x 2 x i1> @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsbf.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1( <vscale x 2 x i1> %0, <vscale x 2 x i1> %1, @@ -73,10 +90,13 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1( i64); define <vscale x 4 x i1> @intrinsic_vmsbf_m_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsbf.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_m_nxv4i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1( <vscale x 4 x i1> %0, i64 %1) @@ -90,10 +110,15 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1( i64); define <vscale x 4 x i1> @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsbf.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1( <vscale x 4 x i1> %0, <vscale x 4 x i1> %1, @@ -107,10 +132,13 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1( i64); define <vscale x 8 x i1> @intrinsic_vmsbf_m_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsbf.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_m_nxv8i1 -; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1( <vscale x 8 x i1> %0, i64 %1) @@ -124,10 +152,15 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1( i64); define <vscale x 8 x i1> @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsbf.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1 -; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1( <vscale x 8 x i1> %0, <vscale x 8 x i1> %1, @@ -141,10 +174,13 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1( i64); define <vscale x 16 x i1> @intrinsic_vmsbf_m_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsbf.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_m_nxv16i1 -; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1( <vscale x 16 x i1> %0, i64 %1) @@ -158,10 +194,15 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1( i64); define <vscale x 16 x i1> @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsbf.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1 -; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1( <vscale x 16 x i1> %0, <vscale x 16 x i1> %1, @@ -175,10 +216,13 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1( i64); define <vscale x 32 x i1> @intrinsic_vmsbf_m_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsbf.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_m_nxv32i1 -; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1( <vscale x 32 x i1> %0, i64 %1) @@ -192,10 +236,15 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1( i64); define <vscale x 32 x i1> @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsbf.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1 -; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1( <vscale x 32 x i1> %0, <vscale x 32 x i1> %1, @@ -209,10 +258,13 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1( i64); define <vscale x 64 x i1> @intrinsic_vmsbf_m_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vmsbf.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_m_nxv64i1 -; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1( <vscale x 64 x i1> %0, i64 %1) @@ -226,10 +278,15 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1( i64); define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsbf.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1 -; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu -; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1( <vscale x 64 x i1> %0, <vscale x 64 x i1> %1, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll index 97fca5b2bc4a..521a256f19de 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s declare <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1( @@ -5,10 +6,13 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1( i32); define <vscale x 1 x i1> @intrinsic_vmsif_m_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsif.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1( <vscale x 1 x i1> %0, i32 %1) @@ -22,10 +26,15 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1( i32); define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsif.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1( <vscale x 1 x i1> %0, <vscale x 1 x i1> %1, @@ -39,10 +48,13 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1( i32); define <vscale x 2 x i1> @intrinsic_vmsif_m_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsif.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1( <vscale x 2 x i1> %0, i32 %1) @@ -56,10 +68,15 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1( i32); define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsif.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1( <vscale x 2 x i1> %0, <vscale x 2 x i1> %1, @@ -73,10 +90,13 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1( i32); define <vscale x 4 x i1> @intrinsic_vmsif_m_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsif.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1( <vscale x 4 x i1> %0, i32 %1) @@ -90,10 +110,15 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1( i32); define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsif.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1( <vscale x 4 x i1> %0, <vscale x 4 x i1> %1, @@ -107,10 +132,13 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1( i32); define <vscale x 8 x i1> @intrinsic_vmsif_m_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsif.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_m_nxv8i1 -; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1( <vscale x 8 x i1> %0, i32 %1) @@ -124,10 +152,15 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1( i32); define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsif.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1 -; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1( <vscale x 8 x i1> %0, <vscale x 8 x i1> %1, @@ -141,10 +174,13 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1( i32); define <vscale x 16 x i1> @intrinsic_vmsif_m_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsif.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_m_nxv16i1 -; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1( <vscale x 16 x i1> %0, i32 %1) @@ -158,10 +194,15 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1( i32); define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsif.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1 -; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1( <vscale x 16 x i1> %0, <vscale x 16 x i1> %1, @@ -175,10 +216,13 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1( i32); define <vscale x 32 x i1> @intrinsic_vmsif_m_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsif.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_m_nxv32i1 -; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1( <vscale x 32 x i1> %0, i32 %1) @@ -192,10 +236,15 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1( i32); define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsif.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1 -; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1( <vscale x 32 x i1> %0, <vscale x 32 x i1> %1, @@ -209,10 +258,13 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1( i32); define <vscale x 64 x i1> @intrinsic_vmsif_m_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vmsif.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_m_nxv64i1 -; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1( <vscale x 64 x i1> %0, i32 %1) @@ -226,10 +278,15 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1( i32); define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsif.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1 -; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1( <vscale x 64 x i1> %0, <vscale x 64 x i1> %1, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll index 280509a63fe4..98c881cbaec3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s declare <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1( @@ -5,10 +6,13 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1( i64); define <vscale x 1 x i1> @intrinsic_vmsif_m_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsif.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1( <vscale x 1 x i1> %0, i64 %1) @@ -22,10 +26,15 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1( i64); define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsif.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1( <vscale x 1 x i1> %0, <vscale x 1 x i1> %1, @@ -39,10 +48,13 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1( i64); define <vscale x 2 x i1> @intrinsic_vmsif_m_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsif.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1( <vscale x 2 x i1> %0, i64 %1) @@ -56,10 +68,15 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1( i64); define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsif.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1( <vscale x 2 x i1> %0, <vscale x 2 x i1> %1, @@ -73,10 +90,13 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1( i64); define <vscale x 4 x i1> @intrinsic_vmsif_m_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsif.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1( <vscale x 4 x i1> %0, i64 %1) @@ -90,10 +110,15 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1( i64); define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsif.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1( <vscale x 4 x i1> %0, <vscale x 4 x i1> %1, @@ -107,10 +132,13 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1( i64); define <vscale x 8 x i1> @intrinsic_vmsif_m_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsif.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_m_nxv8i1 -; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1( <vscale x 8 x i1> %0, i64 %1) @@ -124,10 +152,15 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1( i64); define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsif.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1 -; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1( <vscale x 8 x i1> %0, <vscale x 8 x i1> %1, @@ -141,10 +174,13 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1( i64); define <vscale x 16 x i1> @intrinsic_vmsif_m_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsif.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_m_nxv16i1 -; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1( <vscale x 16 x i1> %0, i64 %1) @@ -158,10 +194,15 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1( i64); define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsif.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1 -; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1( <vscale x 16 x i1> %0, <vscale x 16 x i1> %1, @@ -175,10 +216,13 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1( i64); define <vscale x 32 x i1> @intrinsic_vmsif_m_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsif.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_m_nxv32i1 -; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1( <vscale x 32 x i1> %0, i64 %1) @@ -192,10 +236,15 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1( i64); define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsif.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1 -; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1( <vscale x 32 x i1> %0, <vscale x 32 x i1> %1, @@ -209,10 +258,13 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1( i64); define <vscale x 64 x i1> @intrinsic_vmsif_m_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vmsif.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_m_nxv64i1 -; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1( <vscale x 64 x i1> %0, i64 %1) @@ -226,10 +278,15 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1( i64); define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsif.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1 -; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu -; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1( <vscale x 64 x i1> %0, <vscale x 64 x i1> %1, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll index 8fa635bf3fe1..c2a897197f75 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s declare <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1( @@ -5,10 +6,13 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1( i32); define <vscale x 1 x i1> @intrinsic_vmsof_m_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsof.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1( <vscale x 1 x i1> %0, i32 %1) @@ -22,10 +26,15 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1( i32); define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsof.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1( <vscale x 1 x i1> %0, <vscale x 1 x i1> %1, @@ -39,10 +48,13 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1( i32); define <vscale x 2 x i1> @intrinsic_vmsof_m_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsof.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1( <vscale x 2 x i1> %0, i32 %1) @@ -56,10 +68,15 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1( i32); define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsof.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1( <vscale x 2 x i1> %0, <vscale x 2 x i1> %1, @@ -73,10 +90,13 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1( i32); define <vscale x 4 x i1> @intrinsic_vmsof_m_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsof.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1( <vscale x 4 x i1> %0, i32 %1) @@ -90,10 +110,15 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1( i32); define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsof.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1( <vscale x 4 x i1> %0, <vscale x 4 x i1> %1, @@ -107,10 +132,13 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1( i32); define <vscale x 8 x i1> @intrinsic_vmsof_m_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsof.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_m_nxv8i1 -; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1( <vscale x 8 x i1> %0, i32 %1) @@ -124,10 +152,15 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1( i32); define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsof.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1 -; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1( <vscale x 8 x i1> %0, <vscale x 8 x i1> %1, @@ -141,10 +174,13 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1( i32); define <vscale x 16 x i1> @intrinsic_vmsof_m_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsof.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_m_nxv16i1 -; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1( <vscale x 16 x i1> %0, i32 %1) @@ -158,10 +194,15 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1( i32); define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsof.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1 -; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1( <vscale x 16 x i1> %0, <vscale x 16 x i1> %1, @@ -175,10 +216,13 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1( i32); define <vscale x 32 x i1> @intrinsic_vmsof_m_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsof.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_m_nxv32i1 -; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1( <vscale x 32 x i1> %0, i32 %1) @@ -192,10 +236,15 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1( i32); define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsof.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1 -; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1( <vscale x 32 x i1> %0, <vscale x 32 x i1> %1, @@ -209,10 +258,13 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1( i32); define <vscale x 64 x i1> @intrinsic_vmsof_m_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vmsof.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_m_nxv64i1 -; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1( <vscale x 64 x i1> %0, i32 %1) @@ -226,10 +278,15 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1( i32); define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsof.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1 -; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1( <vscale x 64 x i1> %0, <vscale x 64 x i1> %1, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll index fab86d873002..83774254f337 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s declare <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1( @@ -5,10 +6,13 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1( i64); define <vscale x 1 x i1> @intrinsic_vmsof_m_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsof.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1( <vscale x 1 x i1> %0, i64 %1) @@ -22,10 +26,15 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1( i64); define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsof.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1( <vscale x 1 x i1> %0, <vscale x 1 x i1> %1, @@ -39,10 +48,13 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1( i64); define <vscale x 2 x i1> @intrinsic_vmsof_m_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsof.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1( <vscale x 2 x i1> %0, i64 %1) @@ -56,10 +68,15 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1( i64); define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsof.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1( <vscale x 2 x i1> %0, <vscale x 2 x i1> %1, @@ -73,10 +90,13 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1( i64); define <vscale x 4 x i1> @intrinsic_vmsof_m_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsof.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1( <vscale x 4 x i1> %0, i64 %1) @@ -90,10 +110,15 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1( i64); define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsof.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1 -; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1( <vscale x 4 x i1> %0, <vscale x 4 x i1> %1, @@ -107,10 +132,13 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1( i64); define <vscale x 8 x i1> @intrinsic_vmsof_m_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsof.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_m_nxv8i1 -; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1( <vscale x 8 x i1> %0, i64 %1) @@ -124,10 +152,15 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1( i64); define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsof.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1 -; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1( <vscale x 8 x i1> %0, <vscale x 8 x i1> %1, @@ -141,10 +174,13 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1( i64); define <vscale x 16 x i1> @intrinsic_vmsof_m_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsof.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_m_nxv16i1 -; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1( <vscale x 16 x i1> %0, i64 %1) @@ -158,10 +194,15 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1( i64); define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsof.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1 -; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1( <vscale x 16 x i1> %0, <vscale x 16 x i1> %1, @@ -175,10 +216,13 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1( i64); define <vscale x 32 x i1> @intrinsic_vmsof_m_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsof.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_m_nxv32i1 -; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1( <vscale x 32 x i1> %0, i64 %1) @@ -192,10 +236,15 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1( i64); define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsof.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1 -; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1( <vscale x 32 x i1> %0, <vscale x 32 x i1> %1, @@ -209,10 +258,13 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1( i64); define <vscale x 64 x i1> @intrinsic_vmsof_m_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vmsof.m v25, v0 +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_m_nxv64i1 -; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1( <vscale x 64 x i1> %0, i64 %1) @@ -226,10 +278,15 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1( i64); define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v0, v17 +; CHECK-NEXT: vmsof.m v25, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) entry: -; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1 -; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu -; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1( <vscale x 64 x i1> %0, <vscale x 64 x i1> %1, _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits