llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT--> @llvm/pr-subscribers-llvm-ir @llvm/pr-subscribers-clang-codegen Author: None (CarolineConcatto) <details> <summary>Changes</summary> This patch changes the following intrinsic ```svst1uwq[_{d}] replaced by svst1wq[_{d}] svst1uwq_vnum[_{d}] replaced by svst1wq_vnum[_{d}] svst1udq[_{d}] replaced by svst1dq[_{d}] svst1udq_vnum[_{d}] replaced by svst1dq_vnum[_{d}] ``` Drops 'u' from the quadword stores because it is simply truncating the quadwords to 32 bits ``` svextq_lane[_{d}] replaced by svextq[_{d}] ``` EXTQ follows the previous defined EXT intrinsics ``` svdot[_{d}_{2}_{3}] replaced by svdot[_{d}_{2}] ``` Introduced with the latest SME2 ACLE change [1]https://github.com/ARM-software/acle/pull/257 --- Patch is 69.49 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/76844.diff 11 Files Affected: - (modified) clang/include/clang/Basic/arm_sve.td (+11-11) - (modified) clang/lib/CodeGen/CGBuiltin.cpp (+2-2) - (modified) clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_dot.c (+6-6) - (modified) clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_extq.c (+72-72) - (modified) clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_st1_single.c (+72-72) - (modified) clang/test/Sema/aarch64-sve2p1-intrinsics/acle_sve2p1_imm.cpp (+5-5) - (modified) llvm/include/llvm/IR/IntrinsicsAArch64.td (+3-3) - (modified) llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp (+2-2) - (modified) llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td (+7-7) - (modified) llvm/test/CodeGen/AArch64/sve2p1-intrinsics-extq.ll (+16-16) - (modified) llvm/test/CodeGen/AArch64/sve2p1-intrinsics-st1-single.ll (+38-38) ``````````diff diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td index 91f62c4c76339d..96e5e37d31008b 100644 --- a/clang/include/clang/Basic/arm_sve.td +++ b/clang/include/clang/Basic/arm_sve.td @@ -454,11 +454,11 @@ let TargetGuard = "sve,bf16" in { let TargetGuard = "sve2p1" in { // Contiguous truncating store from quadword (single vector). - def SVST1UWQ : MInst<"svst1uwq[_{d}]", "vPcd", "iUif", [IsStore], MemEltTyInt32, "aarch64_sve_st1uwq">; - def SVST1UWQ_VNUM : MInst<"svst1uwq_vnum[_{d}]", "vPcld", "iUif", [IsStore], MemEltTyInt32, "aarch64_sve_st1uwq">; + def SVST1UWQ : MInst<"svst1wq[_{d}]", "vPcd", "iUif", [IsStore], MemEltTyInt32, "aarch64_sve_st1wq">; + def SVST1UWQ_VNUM : MInst<"svst1wq_vnum[_{d}]", "vPcld", "iUif", [IsStore], MemEltTyInt32, "aarch64_sve_st1wq">; - def SVST1UDQ : MInst<"svst1udq[_{d}]", "vPcd", "lUld", [IsStore], MemEltTyInt64, "aarch64_sve_st1udq">; - def SVST1UDQ_VNUM : MInst<"svst1udq_vnum[_{d}]", "vPcld", "lUld", [IsStore], MemEltTyInt64, "aarch64_sve_st1udq">; + def SVST1UDQ : MInst<"svst1dq[_{d}]", "vPcd", "lUld", [IsStore], MemEltTyInt64, "aarch64_sve_st1dq">; + def SVST1UDQ_VNUM : MInst<"svst1dq_vnum[_{d}]", "vPcld", "lUld", [IsStore], MemEltTyInt64, "aarch64_sve_st1dq">; // Store one vector (vector base + scalar offset) def SVST1Q_SCATTER_U64BASE_OFFSET : MInst<"svst1q_scatter[_{2}base]_offset[_{d}]", "vPgld", "cUcsUsiUilUlfhdb", [IsScatterStore, IsByteIndexed], MemEltTyDefault, "aarch64_sve_st1q_scatter_scalar_offset">; @@ -2043,12 +2043,12 @@ let TargetGuard = "sve2p1|sme2" in { } let TargetGuard = "sve2p1" in { -def SVDOT_X2_S : SInst<"svdot[_{d}_{2}_{3}]", "ddhh", "i", MergeNone, "aarch64_sve_sdot_x2", [], []>; -def SVDOT_X2_U : SInst<"svdot[_{d}_{2}_{3}]", "ddhh", "Ui", MergeNone, "aarch64_sve_udot_x2", [], []>; -def SVDOT_X2_F : SInst<"svdot[_{d}_{2}_{3}]", "ddhh", "f", MergeNone, "aarch64_sve_fdot_x2", [], []>; -def SVDOT_LANE_X2_S : SInst<"svdot_lane[_{d}_{2}_{3}]", "ddhhi", "i", MergeNone, "aarch64_sve_sdot_lane_x2", [], [ImmCheck<3, ImmCheck0_3>]>; -def SVDOT_LANE_X2_U : SInst<"svdot_lane[_{d}_{2}_{3}]", "ddhhi", "Ui", MergeNone, "aarch64_sve_udot_lane_x2", [], [ImmCheck<3, ImmCheck0_3>]>; -def SVDOT_LANE_X2_F : SInst<"svdot_lane[_{d}_{2}_{3}]", "ddhhi", "f", MergeNone, "aarch64_sve_fdot_lane_x2", [], [ImmCheck<3, ImmCheck0_3>]>; +def SVDOT_X2_S : SInst<"svdot[_{d}_{2}]", "ddhh", "i", MergeNone, "aarch64_sve_sdot_x2", [], []>; +def SVDOT_X2_U : SInst<"svdot[_{d}_{2}]", "ddhh", "Ui", MergeNone, "aarch64_sve_udot_x2", [], []>; +def SVDOT_X2_F : SInst<"svdot[_{d}_{2}]", "ddhh", "f", MergeNone, "aarch64_sve_fdot_x2", [], []>; +def SVDOT_LANE_X2_S : SInst<"svdot_lane[_{d}_{2}]", "ddhhi", "i", MergeNone, "aarch64_sve_sdot_lane_x2", [], [ImmCheck<3, ImmCheck0_3>]>; +def SVDOT_LANE_X2_U : SInst<"svdot_lane[_{d}_{2}]", "ddhhi", "Ui", MergeNone, "aarch64_sve_udot_lane_x2", [], [ImmCheck<3, ImmCheck0_3>]>; +def SVDOT_LANE_X2_F : SInst<"svdot_lane[_{d}_{2}]", "ddhhi", "f", MergeNone, "aarch64_sve_fdot_lane_x2", [], [ImmCheck<3, ImmCheck0_3>]>; } let TargetGuard = "sve2p1|sme" in { @@ -2212,7 +2212,7 @@ let TargetGuard = "sve2p1" in { def SVTBLQ : SInst<"svtblq[_{d}]", "ddu", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_tblq">; def SVTBXQ : SInst<"svtbxq[_{d}]", "dddu", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_tbxq">; // EXTQ - def EXTQ : SInst<"svextq_lane[_{d}]", "dddk", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_extq_lane", [], [ImmCheck<2, ImmCheck0_15>]>; + def EXTQ : SInst<"svextq[_{d}]", "dddk", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_extq", [], [ImmCheck<2, ImmCheck0_15>]>; // PMOV // Move to Pred multiclass PMOV_TO_PRED<string name, string types, string intrinsic, list<FlagType> flags=[], ImmCheckType immCh > { diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index f71dbf1729a1d6..1ed35befe1361f 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -9681,8 +9681,8 @@ Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E, bool IsQuadStore = false; switch (IntrinsicID) { - case Intrinsic::aarch64_sve_st1uwq: - case Intrinsic::aarch64_sve_st1udq: + case Intrinsic::aarch64_sve_st1wq: + case Intrinsic::aarch64_sve_st1dq: AddrMemoryTy = llvm::ScalableVectorType::get(MemEltTy, 1); PredTy = llvm::ScalableVectorType::get(IntegerType::get(getLLVMContext(), 1), 1); diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_dot.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_dot.c index d01b59114d5429..035ba244f9441e 100644 --- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_dot.c +++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_dot.c @@ -26,7 +26,7 @@ // svint32_t test_svdot_s32_x2(svint32_t op1, svint16_t op2, svint16_t op3) { - return SVE_ACLE_FUNC(svdot,_s32_s16_s16,)(op1, op2, op3); + return SVE_ACLE_FUNC(svdot,_s32_s16,)(op1, op2, op3); } // CHECK-LABEL: @test_svdot_u32_x2( @@ -41,7 +41,7 @@ svint32_t test_svdot_s32_x2(svint32_t op1, svint16_t op2, svint16_t op3) // svuint32_t test_svdot_u32_x2(svuint32_t op1, svuint16_t op2, svuint16_t op3) { - return SVE_ACLE_FUNC(svdot,_u32_u16_u16,)(op1, op2, op3); + return SVE_ACLE_FUNC(svdot,_u32_u16,)(op1, op2, op3); } // CHECK-LABEL: @test_svdot_f32_x2( @@ -56,7 +56,7 @@ svuint32_t test_svdot_u32_x2(svuint32_t op1, svuint16_t op2, svuint16_t op3) // svfloat32_t test_svdot_f32_x2(svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) { - return SVE_ACLE_FUNC(svdot,_f32_f16_f16,)(op1, op2, op3); + return SVE_ACLE_FUNC(svdot,_f32_f16,)(op1, op2, op3); } @@ -73,7 +73,7 @@ svfloat32_t test_svdot_f32_x2(svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) // svint32_t test_svdot_lane_s32_x2(svint32_t op1, svint16_t op2, svint16_t op3) { - return SVE_ACLE_FUNC(svdot_lane,_s32_s16_s16,)(op1, op2, op3, 3); + return SVE_ACLE_FUNC(svdot_lane,_s32_s16,)(op1, op2, op3, 3); } // CHECK-LABEL: @test_svdot_lane_u32_x2( @@ -88,7 +88,7 @@ svint32_t test_svdot_lane_s32_x2(svint32_t op1, svint16_t op2, svint16_t op3) // svuint32_t test_svdot_lane_u32_x2(svuint32_t op1, svuint16_t op2, svuint16_t op3) { - return SVE_ACLE_FUNC(svdot_lane,_u32_u16_u16,)(op1, op2, op3, 3); + return SVE_ACLE_FUNC(svdot_lane,_u32_u16,)(op1, op2, op3, 3); } // CHECK-LABEL: @test_svdot_lane_f32_x2( @@ -103,5 +103,5 @@ svuint32_t test_svdot_lane_u32_x2(svuint32_t op1, svuint16_t op2, svuint16_t op3 // svfloat32_t test_svdot_lane_f32_x2(svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) { - return SVE_ACLE_FUNC(svdot_lane,_f32_f16_f16,)(op1, op2, op3, 3); + return SVE_ACLE_FUNC(svdot_lane,_f32_f16,)(op1, op2, op3, 3); } diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_extq.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_extq.c index 7704db5667a2a7..738b290b76cf59 100644 --- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_extq.c +++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_extq.c @@ -20,194 +20,194 @@ #define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 #endif -// CHECK-LABEL: define dso_local <vscale x 16 x i8> @test_svextq_lane_u8 +// CHECK-LABEL: define dso_local <vscale x 16 x i8> @test_svextq_u8 // CHECK-SAME: (<vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.extq.lane.nxv16i8(<vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 0) +// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.extq.nxv16i8(<vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 0) // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] // -// CPP-CHECK-LABEL: define dso_local <vscale x 16 x i8> @_Z19test_svextq_lane_u8u11__SVUint8_tS_ +// CPP-CHECK-LABEL: define dso_local <vscale x 16 x i8> @_Z14test_svextq_u8u11__SVUint8_tS_ // CPP-CHECK-SAME: (<vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]]) #[[ATTR0:[0-9]+]] { // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.extq.lane.nxv16i8(<vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.extq.nxv16i8(<vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 0) // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] // -svuint8_t test_svextq_lane_u8(svuint8_t zn, svuint8_t zm) { - return SVE_ACLE_FUNC(svextq_lane, _u8,,)(zn, zm, 0); +svuint8_t test_svextq_u8(svuint8_t zn, svuint8_t zm) { + return SVE_ACLE_FUNC(svextq, _u8,,)(zn, zm, 0); } -// CHECK-LABEL: define dso_local <vscale x 16 x i8> @test_svextq_lane_s8 +// CHECK-LABEL: define dso_local <vscale x 16 x i8> @test_svextq_s8 // CHECK-SAME: (<vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]]) #[[ATTR0]] { // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.extq.lane.nxv16i8(<vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 4) +// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.extq.nxv16i8(<vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 4) // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] // -// CPP-CHECK-LABEL: define dso_local <vscale x 16 x i8> @_Z19test_svextq_lane_s8u10__SVInt8_tS_ +// CPP-CHECK-LABEL: define dso_local <vscale x 16 x i8> @_Z14test_svextq_s8u10__SVInt8_tS_ // CPP-CHECK-SAME: (<vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]]) #[[ATTR0]] { // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.extq.lane.nxv16i8(<vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 4) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.extq.nxv16i8(<vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 4) // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] // -svint8_t test_svextq_lane_s8(svint8_t zn, svint8_t zm) { - return SVE_ACLE_FUNC(svextq_lane, _s8,,)(zn, zm, 4); +svint8_t test_svextq_s8(svint8_t zn, svint8_t zm) { + return SVE_ACLE_FUNC(svextq, _s8,,)(zn, zm, 4); } -// CHECK-LABEL: define dso_local <vscale x 8 x i16> @test_svextq_lane_u16 +// CHECK-LABEL: define dso_local <vscale x 8 x i16> @test_svextq_u16 // CHECK-SAME: (<vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]]) #[[ATTR0]] { // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.extq.lane.nxv8i16(<vscale x 8 x i16> [[ZN]], <vscale x 8 x i16> [[ZM]], i32 1) +// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.extq.nxv8i16(<vscale x 8 x i16> [[ZN]], <vscale x 8 x i16> [[ZM]], i32 1) // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]] // -// CPP-CHECK-LABEL: define dso_local <vscale x 8 x i16> @_Z20test_svextq_lane_u16u12__SVUint16_tS_ +// CPP-CHECK-LABEL: define dso_local <vscale x 8 x i16> @_Z15test_svextq_u16u12__SVUint16_tS_ // CPP-CHECK-SAME: (<vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]]) #[[ATTR0]] { // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.extq.lane.nxv8i16(<vscale x 8 x i16> [[ZN]], <vscale x 8 x i16> [[ZM]], i32 1) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.extq.nxv8i16(<vscale x 8 x i16> [[ZN]], <vscale x 8 x i16> [[ZM]], i32 1) // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]] // -svuint16_t test_svextq_lane_u16(svuint16_t zn, svuint16_t zm) { - return SVE_ACLE_FUNC(svextq_lane, _u16,,)(zn, zm, 1); +svuint16_t test_svextq_u16(svuint16_t zn, svuint16_t zm) { + return SVE_ACLE_FUNC(svextq, _u16,,)(zn, zm, 1); } -// CHECK-LABEL: define dso_local <vscale x 8 x i16> @test_svextq_lane_s16 +// CHECK-LABEL: define dso_local <vscale x 8 x i16> @test_svextq_s16 // CHECK-SAME: (<vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]]) #[[ATTR0]] { // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.extq.lane.nxv8i16(<vscale x 8 x i16> [[ZN]], <vscale x 8 x i16> [[ZM]], i32 5) +// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.extq.nxv8i16(<vscale x 8 x i16> [[ZN]], <vscale x 8 x i16> [[ZM]], i32 5) // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]] // -// CPP-CHECK-LABEL: define dso_local <vscale x 8 x i16> @_Z20test_svextq_lane_s16u11__SVInt16_tS_ +// CPP-CHECK-LABEL: define dso_local <vscale x 8 x i16> @_Z15test_svextq_s16u11__SVInt16_tS_ // CPP-CHECK-SAME: (<vscale x 8 x i16> [[ZN:%.*]], <vscale x 8 x i16> [[ZM:%.*]]) #[[ATTR0]] { // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.extq.lane.nxv8i16(<vscale x 8 x i16> [[ZN]], <vscale x 8 x i16> [[ZM]], i32 5) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.extq.nxv8i16(<vscale x 8 x i16> [[ZN]], <vscale x 8 x i16> [[ZM]], i32 5) // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]] // -svint16_t test_svextq_lane_s16(svint16_t zn, svint16_t zm) { - return SVE_ACLE_FUNC(svextq_lane, _s16,,)(zn, zm, 5); +svint16_t test_svextq_s16(svint16_t zn, svint16_t zm) { + return SVE_ACLE_FUNC(svextq, _s16,,)(zn, zm, 5); } -// CHECK-LABEL: define dso_local <vscale x 4 x i32> @test_svextq_lane_u32 +// CHECK-LABEL: define dso_local <vscale x 4 x i32> @test_svextq_u32 // CHECK-SAME: (<vscale x 4 x i32> [[ZN:%.*]], <vscale x 4 x i32> [[ZM:%.*]]) #[[ATTR0]] { // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.extq.lane.nxv4i32(<vscale x 4 x i32> [[ZN]], <vscale x 4 x i32> [[ZM]], i32 2) +// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.extq.nxv4i32(<vscale x 4 x i32> [[ZN]], <vscale x 4 x i32> [[ZM]], i32 2) // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]] // -// CPP-CHECK-LABEL: define dso_local <vscale x 4 x i32> @_Z20test_svextq_lane_u32u12__SVUint32_tS_ +// CPP-CHECK-LABEL: define dso_local <vscale x 4 x i32> @_Z15test_svextq_u32u12__SVUint32_tS_ // CPP-CHECK-SAME: (<vscale x 4 x i32> [[ZN:%.*]], <vscale x 4 x i32> [[ZM:%.*]]) #[[ATTR0]] { // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.extq.lane.nxv4i32(<vscale x 4 x i32> [[ZN]], <vscale x 4 x i32> [[ZM]], i32 2) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.extq.nxv4i32(<vscale x 4 x i32> [[ZN]], <vscale x 4 x i32> [[ZM]], i32 2) // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]] // -svuint32_t test_svextq_lane_u32(svuint32_t zn, svuint32_t zm) { - return SVE_ACLE_FUNC(svextq_lane, _u32,,)(zn, zm, 2); +svuint32_t test_svextq_u32(svuint32_t zn, svuint32_t zm) { + return SVE_ACLE_FUNC(svextq, _u32,,)(zn, zm, 2); } -// CHECK-LABEL: define dso_local <vscale x 4 x i32> @test_svextq_lane_s32 +// CHECK-LABEL: define dso_local <vscale x 4 x i32> @test_svextq_s32 // CHECK-SAME: (<vscale x 4 x i32> [[ZN:%.*]], <vscale x 4 x i32> [[ZM:%.*]]) #[[ATTR0]] { // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.extq.lane.nxv4i32(<vscale x 4 x i32> [[ZN]], <vscale x 4 x i32> [[ZM]], i32 6) +// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.extq.nxv4i32(<vscale x 4 x i32> [[ZN]], <vscale x 4 x i32> [[ZM]], i32 6) // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]] // -// CPP-CHECK-LABEL: define dso_local <vscale x 4 x i32> @_Z20test_svextq_lane_s32u11__SVInt32_tS_ +// CPP-CHECK-LABEL: define dso_local <vscale x 4 x i32> @_Z15test_svextq_s32u11__SVInt32_tS_ // CPP-CHECK-SAME: (<vscale x 4 x i32> [[ZN:%.*]], <vscale x 4 x i32> [[ZM:%.*]]) #[[ATTR0]] { // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.extq.lane.nxv4i32(<vscale x 4 x i32> [[ZN]], <vscale x 4 x i32> [[ZM]], i32 6) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.extq.nxv4i32(<vscale x 4 x i32> [[ZN]], <vscale x 4 x i32> [[ZM]], i32 6) // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]] // -svint32_t test_svextq_lane_s32(svint32_t zn, svint32_t zm) { - return SVE_ACLE_FUNC(svextq_lane, _s32,,)(zn, zm, 6); +svint32_t test_svextq_s32(svint32_t zn, svint32_t zm) { + return SVE_ACLE_FUNC(svextq, _s32,,)(zn, zm, 6); } -// CHECK-LABEL: define dso_local <vscale x 2 x i64> @test_svextq_lane_u64 +// CHECK-LABEL: define dso_local <vscale x 2 x i64> @test_svextq_u64 // CHECK-SAME: (<vscale x 2 x i64> [[ZN:%.*]], <vscale x 2 x i64> [[ZM:%.*]]) #[[ATTR0]] { // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.extq.lane.nxv2i64(<vscale x 2 x i64> [[ZN]], <vscale x 2 x i64> [[ZM]], i32 3) +// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.extq.nxv2i64(<vscale x 2 x i64> [[ZN]], <vscale x 2 x i64> [[ZM]], i32 3) // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]] // -// CPP-CHECK-LABEL: define dso_local <vscale x 2 x i64> @_Z20test_svextq_lane_u64u12__SVUint64_tS_ +// CPP-CHECK-LABEL: define dso_local <vscale x 2 x i64> @_Z15test_svextq_u64u12__SVUint64_tS_ // CPP-CHECK-SAME: (<vscale x 2 x i64> [[ZN:%.*]], <vscale x 2 x i64> [[ZM:%.*]]) #[[ATTR0]] { // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.extq.lane.nxv2i64(<vscale x 2 x i64> [[ZN]], <vscale x 2 x i64> [[ZM]], i32 3) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.extq.nxv2i64(<vscale x 2 x i64> [[ZN]], <vscale x 2 x i64> [[ZM]], i32 3) // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]] // -svuint64_t test_svextq_lane_u64(svuint64_t zn, svuint64_t zm) { - return SVE_ACLE_FUNC(svextq_lane, _u64,,)(zn, zm, 3); +svuint64_t test_svextq_u64(svuint64_t zn, svuint64_t zm) { + return SVE_ACLE_FUNC(svextq, _u64,,)(zn, zm, 3); } -// CHECK-LABEL: define dso_local <vscale x 2 x i64> @test_svextq_lane_s64 +// CHECK-LABEL: define dso_local <vscale x 2 x i64> @test_svextq_s64 // CHECK-SAME: (<vscale x 2 x i64> [[ZN:%.*]], <vscale x 2 x i64> [[ZM:%.*]]) #[[ATTR0]] { // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.extq.lane.nxv2i64(<vscale x 2 x i64> [[ZN]], <vscale x 2 x i64> [[ZM]], i32 7) +// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.extq.nxv2i64(<vscale x 2 x i64> [[ZN]], <vscale x 2 x i64> [[ZM]], i32 7) // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]] // -// CPP-CHECK-LABEL: define dso_local <vscale x 2 x i64> @_Z20test_svextq_lane_s64u11__SVInt64_tS_ +// CPP-CHECK-LABEL: define dso_local <vscale x 2 x i64> @_Z15test_svextq_s64u11__SVInt64_tS_ // CPP-CHECK-SAME: (<vscale x 2 x i64> [[ZN:%.*]], <vscale x 2 x i64> [[ZM:%.*]]) #[[ATTR0]] { // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.extq.lane.nxv2i64(<vscale x 2 x i64> [[ZN]], <vscale x 2 x i64> [[ZM]], i32 7) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.extq.nxv2i64(<vscale x 2 x i64> [[ZN]], <vscale x 2 x i64> [[ZM]], i32 7) // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]] // -svint64_t test_svextq_lane_s64(svint64_t zn, svint64_t zm) { - return SVE_ACLE_FUNC(svextq_lane, _s64,,)(zn, zm, 7); +svint64_t test_svextq_s64(svint64_t zn, svint64_t zm) { + return SVE_ACLE_FUNC(svextq, _s64,,)(zn, zm, 7); } -// CHECK-LABEL: define dso_local <vscale x 8 x half> @test_svextq_lane_f16 +// CHECK-LABEL: define dso_local <vscale x 8 x half> @test_svextq_f16 // CHECK-SAME: (<vscale x 8 x half> [[ZN:%.*]], <vscale x 8 x half> [[ZM:%.*]]) #[[ATTR0]] { // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.extq.lane.nxv8f16(<vscale x 8 x half> [[ZN]], <vscale x 8 x half> [[ZM]], i32 8) +// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.extq.nxv8f16(<vscale x 8 x half> [[ZN]], <vscale x 8 x half> [[ZM]], i32 8) // CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]] // -// CPP-CHECK-LABEL: define dso_local <vscale x 8 x half> @_Z20test_svextq_lane_f16u13__SVFloat16_tS_ +//... [truncated] `````````` </details> https://github.com/llvm/llvm-project/pull/76844 _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits