https://github.com/jurahul updated https://github.com/llvm/llvm-project/pull/131942
>From 321ac988a49489d910bf8ba90a28d05db853cc0d Mon Sep 17 00:00:00 2001 From: Rahul Joshi <rjo...@nvidia.com> Date: Tue, 18 Mar 2025 13:19:24 -0700 Subject: [PATCH] [IRBuilder] Add new overload for CreateIntrinsic Add a new `CreateIntrinsic` overload with no `Types`, useful for creating calls to non-overloaded intrinsics that don't need additional mangling. --- clang/lib/CodeGen/CGHLSLRuntime.cpp | 4 +- clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp | 9 ++-- llvm/include/llvm/IR/IRBuilder.h | 8 +++ llvm/lib/CodeGen/SafeStack.cpp | 2 +- llvm/lib/CodeGen/StackProtector.cpp | 4 +- llvm/lib/IR/AutoUpgrade.cpp | 52 +++++++++---------- .../Target/AArch64/AArch64ISelLowering.cpp | 4 +- .../AArch64/AArch64TargetTransformInfo.cpp | 2 +- .../AMDGPU/AMDGPUAsanInstrumentation.cpp | 2 +- .../Target/AMDGPU/AMDGPUAtomicOptimizer.cpp | 11 ++-- .../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 2 +- .../AMDGPU/AMDGPULowerKernelArguments.cpp | 2 +- .../AMDGPU/AMDGPULowerModuleLDSPass.cpp | 3 +- .../lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp | 6 +-- llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp | 18 +++---- llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 8 +-- llvm/lib/Target/ARM/ARMISelLowering.cpp | 10 ++-- llvm/lib/Target/ARM/MVETailPredication.cpp | 2 +- llvm/lib/Target/Hexagon/HexagonGenExtract.cpp | 2 +- .../Target/Hexagon/HexagonISelLowering.cpp | 4 +- .../Target/Hexagon/HexagonVectorCombine.cpp | 2 +- llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 2 +- llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp | 6 +-- llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp | 4 +- llvm/lib/Target/SPIRV/SPIRVStructurizer.cpp | 2 +- .../Target/X86/X86InstCombineIntrinsic.cpp | 4 +- llvm/lib/Target/X86/X86LowerAMXType.cpp | 20 +++---- llvm/lib/Target/X86/X86WinEHState.cpp | 6 +-- .../Target/XCore/XCoreLowerThreadLocal.cpp | 2 +- llvm/lib/Transforms/IPO/CrossDSOCFI.cpp | 2 +- .../Instrumentation/BoundsChecking.cpp | 4 +- .../Instrumentation/HWAddressSanitizer.cpp | 3 +- llvm/lib/Transforms/Instrumentation/KCFI.cpp | 2 +- .../Instrumentation/MemorySanitizer.cpp | 4 +- .../Instrumentation/PGOInstrumentation.cpp | 7 ++- .../Instrumentation/ThreadSanitizer.cpp | 2 +- llvm/lib/Transforms/Scalar/SROA.cpp | 2 +- llvm/lib/Transforms/Utils/GuardUtils.cpp | 2 +- llvm/lib/Transforms/Utils/InlineFunction.cpp | 4 +- llvm/unittests/IR/IRBuilderTest.cpp | 44 +++++++++------- llvm/unittests/Transforms/Utils/LocalTest.cpp | 4 +- 41 files changed, 146 insertions(+), 137 deletions(-) diff --git a/clang/lib/CodeGen/CGHLSLRuntime.cpp b/clang/lib/CodeGen/CGHLSLRuntime.cpp index 0e859dd4a0b1d..3b1810b62a2cd 100644 --- a/clang/lib/CodeGen/CGHLSLRuntime.cpp +++ b/clang/lib/CodeGen/CGHLSLRuntime.cpp @@ -385,8 +385,8 @@ void CGHLSLRuntime::emitEntryFunction(const FunctionDecl *FD, SmallVector<OperandBundleDef, 1> OB; if (CGM.shouldEmitConvergenceTokens()) { assert(EntryFn->isConvergent()); - llvm::Value *I = B.CreateIntrinsic( - llvm::Intrinsic::experimental_convergence_entry, {}, {}); + llvm::Value *I = + B.CreateIntrinsic(llvm::Intrinsic::experimental_convergence_entry, {}); llvm::Value *bundleArgs[] = {I}; OB.emplace_back("convergencectrl", bundleArgs); } diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp index f94917c905081..632181ff0c46e 100644 --- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp +++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp @@ -241,13 +241,14 @@ static Value *handleHlslClip(const CallExpr *E, CodeGenFunction *CGF) { CMP = CGF->Builder.CreateIntrinsic( CGF->Builder.getInt1Ty(), CGF->CGM.getHLSLRuntime().getAnyIntrinsic(), {FCompInst}); - } else + } else { CMP = CGF->Builder.CreateFCmpOLT(Op0, FZeroConst); + } - if (CGF->CGM.getTarget().getTriple().isDXIL()) + if (CGF->CGM.getTarget().getTriple().isDXIL()) { LastInstr = CGF->Builder.CreateIntrinsic( CGF->VoidTy, Intrinsic::dx_discard, {CMP}); - else if (CGF->CGM.getTarget().getTriple().isSPIRV()) { + } else if (CGF->CGM.getTarget().getTriple().isSPIRV()) { BasicBlock *LT0 = CGF->createBasicBlock("lt0", CGF->CurFn); BasicBlock *End = CGF->createBasicBlock("end", CGF->CurFn); @@ -255,7 +256,7 @@ static Value *handleHlslClip(const CallExpr *E, CodeGenFunction *CGF) { CGF->Builder.SetInsertPoint(LT0); - CGF->Builder.CreateIntrinsic(CGF->VoidTy, Intrinsic::spv_discard, {}); + CGF->Builder.CreateIntrinsic(Intrinsic::spv_discard, {}); LastInstr = CGF->Builder.CreateBr(End); CGF->Builder.SetInsertPoint(End); diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h index 750a99cc50dd7..07660e93253da 100644 --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -1004,6 +1004,14 @@ class IRBuilderBase { ArrayRef<Value *> Args, FMFSource FMFSource = {}, const Twine &Name = ""); + /// Create a call to non-overloaded intrinsic \p ID with \p Args. If + /// \p FMFSource is provided, copy fast-math-flags from that instruction to + /// the intrinsic. + CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Value *> Args, + FMFSource FMFSource = {}, const Twine &Name = "") { + return CreateIntrinsic(ID, /*Types=*/{}, Args, FMFSource, Name); + } + /// Create call to the minnum intrinsic. Value *CreateMinNum(Value *LHS, Value *RHS, FMFSource FMFSource = {}, const Twine &Name = "") { diff --git a/llvm/lib/CodeGen/SafeStack.cpp b/llvm/lib/CodeGen/SafeStack.cpp index 74e9d945c1885..da229f86f24ce 100644 --- a/llvm/lib/CodeGen/SafeStack.cpp +++ b/llvm/lib/CodeGen/SafeStack.cpp @@ -367,7 +367,7 @@ Value *SafeStack::getStackGuard(IRBuilder<> &IRB, Function &F) { if (!StackGuardVar) { TL.insertSSPDeclarations(*M); - return IRB.CreateIntrinsic(Intrinsic::stackguard, {}, {}); + return IRB.CreateIntrinsic(Intrinsic::stackguard, {}); } return IRB.CreateLoad(StackPtrTy, StackGuardVar, "StackGuard"); diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp index e823df3186a54..4c8b21d1e29aa 100644 --- a/llvm/lib/CodeGen/StackProtector.cpp +++ b/llvm/lib/CodeGen/StackProtector.cpp @@ -542,7 +542,7 @@ static Value *getStackGuard(const TargetLoweringBase *TLI, Module *M, if (SupportsSelectionDAGSP) *SupportsSelectionDAGSP = true; TLI->insertSSPDeclarations(*M); - return B.CreateIntrinsic(Intrinsic::stackguard, {}, {}); + return B.CreateIntrinsic(Intrinsic::stackguard, {}); } /// Insert code into the entry block that stores the stack guard @@ -563,7 +563,7 @@ static bool CreatePrologue(Function *F, Module *M, Instruction *CheckLoc, AI = B.CreateAlloca(PtrTy, nullptr, "StackGuardSlot"); Value *GuardSlot = getStackGuard(TLI, M, B, &SupportsSelectionDAGSP); - B.CreateIntrinsic(Intrinsic::stackprotector, {}, {GuardSlot, AI}); + B.CreateIntrinsic(Intrinsic::stackprotector, {GuardSlot, AI}); return SupportsSelectionDAGSP; } diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp index 9be307bb071ed..a8d76c2679968 100644 --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -1767,7 +1767,7 @@ static Value *upgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI, if (!IndexForm) std::swap(Args[0], Args[1]); - Value *V = Builder.CreateIntrinsic(IID, {}, Args); + Value *V = Builder.CreateIntrinsic(IID, Args); Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Builder.CreateBitCast(CI.getArgOperand(1), Ty); @@ -2022,8 +2022,8 @@ static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI, // Replace a masked intrinsic with an older unmasked intrinsic. static Value *upgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI, Intrinsic::ID IID) { - Value *Rep = Builder.CreateIntrinsic( - IID, {}, {CI.getArgOperand(0), CI.getArgOperand(1)}); + Value *Rep = + Builder.CreateIntrinsic(IID, {CI.getArgOperand(0), CI.getArgOperand(1)}); return emitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2)); } @@ -2280,7 +2280,7 @@ static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder, SmallVector<Value *, 4> Args(CI.args()); Args.pop_back(); Args.pop_back(); - Rep = Builder.CreateIntrinsic(IID, {}, Args); + Rep = Builder.CreateIntrinsic(IID, Args); unsigned NumArgs = CI.arg_size(); Rep = emitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep, CI.getArgOperand(NumArgs - 2)); @@ -2510,7 +2510,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, : Intrinsic::x86_avx512_sqrt_pd_512; Value *Args[] = {CI->getArgOperand(0), CI->getArgOperand(3)}; - Rep = Builder.CreateIntrinsic(IID, {}, Args); + Rep = Builder.CreateIntrinsic(IID, Args); } else { Rep = Builder.CreateIntrinsic(Intrinsic::sqrt, CI->getType(), {CI->getArgOperand(0)}); @@ -2637,8 +2637,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, break; } - Rep = Builder.CreateIntrinsic(IID, {}, - {CI->getOperand(0), CI->getArgOperand(1)}); + Rep = + Builder.CreateIntrinsic(IID, {CI->getOperand(0), CI->getArgOperand(1)}); Rep = applyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2)); } else if (Name.starts_with("avx512.mask.fpclass.p")) { Type *OpTy = CI->getArgOperand(0)->getType(); @@ -2660,8 +2660,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, else llvm_unreachable("Unexpected intrinsic"); - Rep = Builder.CreateIntrinsic(IID, {}, - {CI->getOperand(0), CI->getArgOperand(1)}); + Rep = + Builder.CreateIntrinsic(IID, {CI->getOperand(0), CI->getArgOperand(1)}); Rep = applyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2)); } else if (Name.starts_with("avx512.cmp.p")) { SmallVector<Value *, 4> Args(CI->args()); @@ -2689,7 +2689,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, std::swap(Mask, Args.back()); Args.push_back(Mask); - Rep = Builder.CreateIntrinsic(IID, {}, Args); + Rep = Builder.CreateIntrinsic(IID, Args); } else if (Name.starts_with("avx512.mask.cmp.")) { // Integer compare intrinsics. unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); @@ -2905,7 +2905,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, } else if (Name == "sse42.crc32.64.8") { Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C)); - Rep = Builder.CreateIntrinsic(Intrinsic::x86_sse42_crc32_32_8, {}, + Rep = Builder.CreateIntrinsic(Intrinsic::x86_sse42_crc32_32_8, {Trunc0, CI->getArgOperand(1)}); Rep = Builder.CreateZExt(Rep, CI->getType(), ""); } else if (Name.starts_with("avx.vbroadcast.s") || @@ -3395,7 +3395,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, IID = Intrinsic::x86_avx512_add_pd_512; Rep = Builder.CreateIntrinsic( - IID, {}, + IID, {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(4)}); } else { Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1)); @@ -3411,7 +3411,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, IID = Intrinsic::x86_avx512_div_pd_512; Rep = Builder.CreateIntrinsic( - IID, {}, + IID, {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(4)}); } else { Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1)); @@ -3427,7 +3427,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, IID = Intrinsic::x86_avx512_mul_pd_512; Rep = Builder.CreateIntrinsic( - IID, {}, + IID, {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(4)}); } else { Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1)); @@ -3443,7 +3443,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, IID = Intrinsic::x86_avx512_sub_pd_512; Rep = Builder.CreateIntrinsic( - IID, {}, + IID, {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(4)}); } else { Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1)); @@ -3461,7 +3461,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble]; Rep = Builder.CreateIntrinsic( - IID, {}, + IID, {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(4)}); Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, CI->getArgOperand(2)); @@ -3759,7 +3759,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, IID = Intrinsic::x86_avx512_vfmadd_f64; else IID = Intrinsic::x86_avx512_vfmadd_f32; - Rep = Builder.CreateIntrinsic(IID, {}, Ops); + Rep = Builder.CreateIntrinsic(IID, Ops); } else { Rep = Builder.CreateFMA(A, B, C); } @@ -3812,7 +3812,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, else IID = Intrinsic::x86_avx512_vfmadd_pd_512; - Rep = Builder.CreateIntrinsic(IID, {}, {A, B, C, CI->getArgOperand(4)}); + Rep = Builder.CreateIntrinsic(IID, {A, B, C, CI->getArgOperand(4)}); } else { Rep = Builder.CreateFMA(A, B, C); } @@ -3840,7 +3840,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, Value *Ops[] = {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2)}; Ops[2] = Builder.CreateFNeg(Ops[2]); - Rep = Builder.CreateIntrinsic(IID, {}, Ops); + Rep = Builder.CreateIntrinsic(IID, Ops); } else if (Name.starts_with("avx512.mask.vfmaddsub.p") || Name.starts_with("avx512.mask3.vfmaddsub.p") || Name.starts_with("avx512.maskz.vfmaddsub.p") || @@ -3863,7 +3863,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, if (IsSubAdd) Ops[2] = Builder.CreateFNeg(Ops[2]); - Rep = Builder.CreateIntrinsic(IID, {}, Ops); + Rep = Builder.CreateIntrinsic(IID, Ops); } else { int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements(); @@ -3914,7 +3914,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, Value *Args[] = {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2), CI->getArgOperand(3)}; - Rep = Builder.CreateIntrinsic(IID, {}, Args); + Rep = Builder.CreateIntrinsic(IID, Args); Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) : CI->getArgOperand(0); Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru); @@ -3941,7 +3941,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, Value *Args[] = {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2)}; - Rep = Builder.CreateIntrinsic(IID, {}, Args); + Rep = Builder.CreateIntrinsic(IID, Args); Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) : CI->getArgOperand(0); Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); @@ -3976,7 +3976,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, Value *Args[] = {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2)}; - Rep = Builder.CreateIntrinsic(IID, {}, Args); + Rep = Builder.CreateIntrinsic(IID, Args); Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) : CI->getArgOperand(0); Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); @@ -4005,7 +4005,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, Value *Args[] = {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2)}; - Rep = Builder.CreateIntrinsic(IID, {}, Args); + Rep = Builder.CreateIntrinsic(IID, Args); Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) : CI->getArgOperand(0); Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); @@ -4027,7 +4027,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F, // Make a call with 3 operands. Value *Args[] = {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2)}; - Value *NewCall = Builder.CreateIntrinsic(IID, {}, Args); + Value *NewCall = Builder.CreateIntrinsic(IID, Args); // Extract the second result and store it. Value *Data = Builder.CreateExtractValue(NewCall, 1); @@ -4095,7 +4095,7 @@ static Value *upgradeAArch64IntrinsicCall(StringRef Name, CallBase *CI, Args[1] = Builder.CreateIntrinsic( Intrinsic::aarch64_sve_convert_from_svbool, GoodPredTy, Args[1]); - return Builder.CreateIntrinsic(NewID, {}, Args, /*FMFSource=*/nullptr, + return Builder.CreateIntrinsic(NewID, Args, /*FMFSource=*/nullptr, CI->getName()); } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 3a45802b24cc1..64e0ae58afb3a 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -28069,7 +28069,7 @@ Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder, IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp; Value *LoHi = - Builder.CreateIntrinsic(Int, {}, Addr, /*FMFSource=*/nullptr, "lohi"); + Builder.CreateIntrinsic(Int, Addr, /*FMFSource=*/nullptr, "lohi"); Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); @@ -28099,7 +28099,7 @@ Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder, void AArch64TargetLowering::emitAtomicCmpXchgNoStoreLLBalance( IRBuilderBase &Builder) const { - Builder.CreateIntrinsic(Intrinsic::aarch64_clrex, {}, {}); + Builder.CreateIntrinsic(Intrinsic::aarch64_clrex, {}); } Value *AArch64TargetLowering::emitStoreConditional(IRBuilderBase &Builder, diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index a3bf8c53571f7..df429e7840725 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -1476,7 +1476,7 @@ static std::optional<Instruction *> instCombineRDFFR(InstCombiner &IC, auto *PTrue = IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {II.getType()}, {AllPat}); auto *RDFFR = - IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_rdffr_z, {}, {PTrue}); + IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_rdffr_z, {PTrue}); RDFFR->takeName(&II); return IC.replaceInstUsesWith(II, RDFFR); } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp index 6554863e08c91..19e2a6a27020d 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp @@ -73,7 +73,7 @@ static Instruction *genAMDGPUReportBlock(Module &M, IRBuilder<> &IRB, Trm = SplitBlockAndInsertIfThen(Cond, Trm, false); IRB.SetInsertPoint(Trm); - return IRB.CreateIntrinsic(Intrinsic::amdgcn_unreachable, {}, {}); + return IRB.CreateIntrinsic(Intrinsic::amdgcn_unreachable, {}); } static Value *createSlowPathCmp(Module &M, IRBuilder<> &IRB, Type *IntptrTy, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp index 76b1775f0d096..0a163f8dc7f6b 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp @@ -666,7 +666,7 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I, // Record I's original position as the entry block. PixelEntryBB = I.getParent(); - Value *const Cond = B.CreateIntrinsic(Intrinsic::amdgcn_ps_live, {}, {}); + Value *const Cond = B.CreateIntrinsic(Intrinsic::amdgcn_ps_live, {}); Instruction *const NonHelperTerminator = SplitBlockAndInsertIfThen(Cond, &I, false, nullptr, &DTU, nullptr); @@ -698,15 +698,14 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I, // using the mbcnt intrinsic. Value *Mbcnt; if (ST.isWave32()) { - Mbcnt = B.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_lo, {}, - {Ballot, B.getInt32(0)}); + Mbcnt = + B.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_lo, {Ballot, B.getInt32(0)}); } else { Value *const ExtractLo = B.CreateTrunc(Ballot, Int32Ty); Value *const ExtractHi = B.CreateTrunc(B.CreateLShr(Ballot, 32), Int32Ty); - Mbcnt = B.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_lo, {}, + Mbcnt = B.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_lo, {ExtractLo, B.getInt32(0)}); - Mbcnt = - B.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_hi, {}, {ExtractHi, Mbcnt}); + Mbcnt = B.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_hi, {ExtractHi, Mbcnt}); } Function *F = I.getFunction(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp index fdba8835cbf0a..a0c28879f6df4 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp @@ -1033,7 +1033,7 @@ Value *AMDGPUCodeGenPrepareImpl::optimizeWithFDivFast( if (!HasFP32DenormalFlush && !NumIsOne) return nullptr; - return Builder.CreateIntrinsic(Intrinsic::amdgcn_fdiv_fast, {}, {Num, Den}); + return Builder.CreateIntrinsic(Intrinsic::amdgcn_fdiv_fast, {Num, Den}); } Value *AMDGPUCodeGenPrepareImpl::visitFDivElement( diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp index 09412d1b0f1cc..a4e6768b4630d 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp @@ -304,7 +304,7 @@ static bool lowerKernelArguments(Function &F, const TargetMachine &TM) { return false; CallInst *KernArgSegment = - Builder.CreateIntrinsic(Intrinsic::amdgcn_kernarg_segment_ptr, {}, {}, + Builder.CreateIntrinsic(Intrinsic::amdgcn_kernarg_segment_ptr, {}, nullptr, F.getName() + ".kernarg.segment"); KernArgSegment->addRetAttr(Attribute::NonNull); KernArgSegment->addRetAttr( diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp index 55497c837ee23..3c08d1edb4991 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp @@ -532,8 +532,7 @@ class AMDGPULowerModuleLDS { auto InsertAt = F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca(); IRBuilder<> Builder(&*InsertAt); - It->second = - Builder.CreateIntrinsic(Intrinsic::amdgcn_lds_kernel_id, {}, {}); + It->second = Builder.CreateIntrinsic(Intrinsic::amdgcn_lds_kernel_id, {}); } return It->second; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp index 98a70c0dbb912..94ecb6ba9a2b8 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -1068,9 +1068,9 @@ AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) { if (!IsAMDHSA) { CallInst *LocalSizeY = - Builder.CreateIntrinsic(Intrinsic::r600_read_local_size_y, {}, {}); + Builder.CreateIntrinsic(Intrinsic::r600_read_local_size_y, {}); CallInst *LocalSizeZ = - Builder.CreateIntrinsic(Intrinsic::r600_read_local_size_z, {}, {}); + Builder.CreateIntrinsic(Intrinsic::r600_read_local_size_z, {}); ST.makeLIDRangeMetadata(LocalSizeY); ST.makeLIDRangeMetadata(LocalSizeZ); @@ -1113,7 +1113,7 @@ AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) { // } hsa_kernel_dispatch_packet_t // CallInst *DispatchPtr = - Builder.CreateIntrinsic(Intrinsic::amdgcn_dispatch_ptr, {}, {}); + Builder.CreateIntrinsic(Intrinsic::amdgcn_dispatch_ptr, {}); DispatchPtr->addRetAttr(Attribute::NoAlias); DispatchPtr->addRetAttr(Attribute::NonNull); F.removeFnAttr("amdgpu-no-dispatch-ptr"); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp index 2a41f7cad1f00..85b25173664fd 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp @@ -787,9 +787,9 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func, DebugLoc FirstDL = getOrCreateDebugLoc(&*PrevEntryBlock->begin(), Func->getSubprogram()); IRB.SetCurrentDebugLocation(FirstDL); - Value *WIdx = IRB.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {}, {}); - Value *WIdy = IRB.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_y, {}, {}); - Value *WIdz = IRB.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_z, {}, {}); + Value *WIdx = IRB.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {}); + Value *WIdy = IRB.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_y, {}); + Value *WIdz = IRB.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_z, {}); Value *XYOr = IRB.CreateOr(WIdx, WIdy); Value *XYZOr = IRB.CreateOr(XYOr, WIdz); Value *WIdzCond = IRB.CreateICmpEQ(XYZOr, IRB.getInt32(0)); @@ -854,7 +854,7 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func, "Dynamic LDS size query is only supported for CO V5 and later."); // Get size from hidden dyn_lds_size argument of kernel Value *ImplicitArg = - IRB.CreateIntrinsic(Intrinsic::amdgcn_implicitarg_ptr, {}, {}); + IRB.CreateIntrinsic(Intrinsic::amdgcn_implicitarg_ptr, {}); Value *HiddenDynLDSSize = IRB.CreateInBoundsGEP( ImplicitArg->getType(), ImplicitArg, {ConstantInt::get(Int64Ty, COV5_HIDDEN_DYN_LDS_SIZE_ARG)}); @@ -870,7 +870,7 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func, // Create a call to malloc function which does device global memory allocation // with size equals to all LDS global accesses size in this kernel. Value *ReturnAddress = - IRB.CreateIntrinsic(Intrinsic::returnaddress, {}, {IRB.getInt32(0)}); + IRB.CreateIntrinsic(Intrinsic::returnaddress, {IRB.getInt32(0)}); FunctionCallee MallocFunc = M.getOrInsertFunction( StringRef("__asan_malloc_impl"), FunctionType::get(Int64Ty, {Int64Ty, Int64Ty}, false)); @@ -896,7 +896,7 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func, XYZCondPhi->addIncoming(IRB.getInt1(0), WIdBlock); XYZCondPhi->addIncoming(IRB.getInt1(1), MallocBlock); - IRB.CreateIntrinsic(Intrinsic::amdgcn_s_barrier, {}, {}); + IRB.CreateIntrinsic(Intrinsic::amdgcn_s_barrier, {}); // Load malloc pointer from Sw LDS. Value *LoadMallocPtr = @@ -925,7 +925,7 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func, // Cond Free Block IRB.SetInsertPoint(CondFreeBlock, CondFreeBlock->begin()); - IRB.CreateIntrinsic(Intrinsic::amdgcn_s_barrier, {}, {}); + IRB.CreateIntrinsic(Intrinsic::amdgcn_s_barrier, {}); IRB.CreateCondBr(XYZCondPhi, FreeBlock, EndBlock); // Free Block @@ -936,7 +936,7 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func, StringRef("__asan_free_impl"), FunctionType::get(IRB.getVoidTy(), {Int64Ty, Int64Ty}, false)); Value *ReturnAddr = - IRB.CreateIntrinsic(Intrinsic::returnaddress, {}, IRB.getInt32(0)); + IRB.CreateIntrinsic(Intrinsic::returnaddress, IRB.getInt32(0)); Value *RAPToInt = IRB.CreatePtrToInt(ReturnAddr, Int64Ty); Value *MallocPtrToInt = IRB.CreatePtrToInt(LoadMallocPtr, Int64Ty); IRB.CreateCall(AsanFreeFunc, {MallocPtrToInt, RAPToInt}); @@ -1070,7 +1070,7 @@ void AMDGPUSwLowerLDS::lowerNonKernelLDSAccesses( SetVector<Instruction *> LDSInstructions; getLDSMemoryInstructions(Func, LDSInstructions); - auto *KernelId = IRB.CreateIntrinsic(Intrinsic::amdgcn_lds_kernel_id, {}, {}); + auto *KernelId = IRB.CreateIntrinsic(Intrinsic::amdgcn_lds_kernel_id, {}); GlobalVariable *LDSBaseTable = NKLDSParams.LDSBaseTable; GlobalVariable *LDSOffsetTable = NKLDSParams.LDSOffsetTable; auto &OrdereLDSGlobals = NKLDSParams.OrdereLDSGlobals; diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 8657c0389cd40..ccd3bf08890dc 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -17369,8 +17369,8 @@ void SITargetLowering::emitExpandAtomicAddrSpacePredicate( Value *LoadedShared = nullptr; if (FullFlatEmulation) { - CallInst *IsShared = Builder.CreateIntrinsic( - Intrinsic::amdgcn_is_shared, {}, {Addr}, nullptr, "is.shared"); + CallInst *IsShared = Builder.CreateIntrinsic(Intrinsic::amdgcn_is_shared, + {Addr}, nullptr, "is.shared"); Builder.CreateCondBr(IsShared, SharedBB, CheckPrivateBB); Builder.SetInsertPoint(SharedBB); Value *CastToLocal = Builder.CreateAddrSpaceCast( @@ -17385,8 +17385,8 @@ void SITargetLowering::emitExpandAtomicAddrSpacePredicate( Builder.SetInsertPoint(CheckPrivateBB); } - CallInst *IsPrivate = Builder.CreateIntrinsic( - Intrinsic::amdgcn_is_private, {}, {Addr}, nullptr, "is.private"); + CallInst *IsPrivate = Builder.CreateIntrinsic(Intrinsic::amdgcn_is_private, + {Addr}, nullptr, "is.private"); Builder.CreateCondBr(IsPrivate, PrivateBB, GlobalBB); Builder.SetInsertPoint(PrivateBB); diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index f99e084d9347c..3ba03d6790185 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -21213,7 +21213,7 @@ Instruction *ARMTargetLowering::makeDMB(IRBuilderBase &Builder, Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0), Builder.getInt32(0), Builder.getInt32(7), Builder.getInt32(10), Builder.getInt32(5)}; - return Builder.CreateIntrinsic(Intrinsic::arm_mcr, {}, args); + return Builder.CreateIntrinsic(Intrinsic::arm_mcr, args); } else { // Instead of using barriers, atomic accesses on these subtargets use // libcalls. @@ -21223,7 +21223,7 @@ Instruction *ARMTargetLowering::makeDMB(IRBuilderBase &Builder, // Only a full system barrier exists in the M-class architectures. Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain; Constant *CDomain = Builder.getInt32(Domain); - return Builder.CreateIntrinsic(Intrinsic::arm_dmb, {}, CDomain); + return Builder.CreateIntrinsic(Intrinsic::arm_dmb, CDomain); } } @@ -21478,7 +21478,7 @@ Value *ARMTargetLowering::emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd; Value *LoHi = - Builder.CreateIntrinsic(Int, {}, Addr, /*FMFSource=*/nullptr, "lohi"); + Builder.CreateIntrinsic(Int, Addr, /*FMFSource=*/nullptr, "lohi"); Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); @@ -21503,7 +21503,7 @@ void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance( IRBuilderBase &Builder) const { if (!Subtarget->hasV7Ops()) return; - Builder.CreateIntrinsic(Intrinsic::arm_clrex, {}, {}); + Builder.CreateIntrinsic(Intrinsic::arm_clrex, {}); } Value *ARMTargetLowering::emitStoreConditional(IRBuilderBase &Builder, @@ -21524,7 +21524,7 @@ Value *ARMTargetLowering::emitStoreConditional(IRBuilderBase &Builder, Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi"); if (!Subtarget->isLittle()) std::swap(Lo, Hi); - return Builder.CreateIntrinsic(Int, {}, {Lo, Hi, Addr}); + return Builder.CreateIntrinsic(Int, {Lo, Hi, Addr}); } Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex; diff --git a/llvm/lib/Target/ARM/MVETailPredication.cpp b/llvm/lib/Target/ARM/MVETailPredication.cpp index 98209f0cbe24f..bb07d79c9374a 100644 --- a/llvm/lib/Target/ARM/MVETailPredication.cpp +++ b/llvm/lib/Target/ARM/MVETailPredication.cpp @@ -399,7 +399,7 @@ void MVETailPredication::InsertVCTPIntrinsic(IntrinsicInst *ActiveLaneMask, case 8: VCTPID = Intrinsic::arm_mve_vctp16; break; case 16: VCTPID = Intrinsic::arm_mve_vctp8; break; } - Value *VCTPCall = Builder.CreateIntrinsic(VCTPID, {}, Processed); + Value *VCTPCall = Builder.CreateIntrinsic(VCTPID, Processed); ActiveLaneMask->replaceAllUsesWith(VCTPCall); // Add the incoming value to the new phi. diff --git a/llvm/lib/Target/Hexagon/HexagonGenExtract.cpp b/llvm/lib/Target/Hexagon/HexagonGenExtract.cpp index b44519a1286d0..0f0788616860e 100644 --- a/llvm/lib/Target/Hexagon/HexagonGenExtract.cpp +++ b/llvm/lib/Target/Hexagon/HexagonGenExtract.cpp @@ -212,7 +212,7 @@ bool HexagonGenExtract::convert(Instruction *In) { Intrinsic::ID IntId = (BW == 32) ? Intrinsic::hexagon_S2_extractu : Intrinsic::hexagon_S2_extractup; Value *NewIn = - IRB.CreateIntrinsic(IntId, {}, {BF, IRB.getInt32(W), IRB.getInt32(SR)}); + IRB.CreateIntrinsic(IntId, {BF, IRB.getInt32(W), IRB.getInt32(SR)}); if (SL != 0) NewIn = IRB.CreateShl(NewIn, SL, CSL->getName()); In->replaceAllUsesWith(NewIn); diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp index 1710488e4e292..4c479ac41be12 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -3854,7 +3854,7 @@ Value *HexagonTargetLowering::emitLoadLinked(IRBuilderBase &Builder, : Intrinsic::hexagon_L4_loadd_locked; Value *Call = - Builder.CreateIntrinsic(IntID, {}, Addr, /*FMFSource=*/nullptr, "larx"); + Builder.CreateIntrinsic(IntID, Addr, /*FMFSource=*/nullptr, "larx"); return Builder.CreateBitCast(Call, ValueTy); } @@ -3876,7 +3876,7 @@ Value *HexagonTargetLowering::emitStoreConditional(IRBuilderBase &Builder, Val = Builder.CreateBitCast(Val, CastTy); - Value *Call = Builder.CreateIntrinsic(IntID, {}, {Addr, Val}, + Value *Call = Builder.CreateIntrinsic(IntID, {Addr, Val}, /*FMFSource=*/nullptr, "stcx"); Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), ""); Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext())); diff --git a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp index 0760d712f9afd..d89bc41b910d0 100644 --- a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp @@ -2392,7 +2392,7 @@ auto HexagonVectorCombine::vralignb(IRBuilderBase &Builder, Value *Lo, Type *Int64Ty = Type::getInt64Ty(F.getContext()); Value *Lo64 = Builder.CreateBitCast(Lo, Int64Ty, "cst"); Value *Hi64 = Builder.CreateBitCast(Hi, Int64Ty, "cst"); - Value *Call = Builder.CreateIntrinsic(Intrinsic::hexagon_S2_valignrb, {}, + Value *Call = Builder.CreateIntrinsic(Intrinsic::hexagon_S2_valignrb, {Hi64, Lo64, Amt}, /*FMFSource=*/nullptr, "cup"); return Builder.CreateBitCast(Call, Lo->getType(), "cst"); diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index ab78f33f5a630..e2ce5e4fc17e1 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -12429,7 +12429,7 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N, //===----------------------------------------------------------------------===// static Instruction *callIntrinsic(IRBuilderBase &Builder, Intrinsic::ID Id) { - return Builder.CreateIntrinsic(Id, {}, {}); + return Builder.CreateIntrinsic(Id, {}); } // The mappings for emitLeading/TrailingFence is taken from diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp index 68651f4ee4d2f..208e4bad3808c 100644 --- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp @@ -1193,7 +1193,7 @@ void SPIRVEmitIntrinsics::preprocessUndefs(IRBuilder<> &B) { setInsertPointSkippingPhis(B, I); BPrepared = true; } - auto *IntrUndef = B.CreateIntrinsic(Intrinsic::spv_undef, {}, {}); + auto *IntrUndef = B.CreateIntrinsic(Intrinsic::spv_undef, {}); Worklist.push(IntrUndef); I->replaceUsesOfWith(Op, IntrUndef); AggrConsts[IntrUndef] = AggrUndef; @@ -1306,7 +1306,7 @@ Instruction *SPIRVEmitIntrinsics::visitCallInst(CallInst &Call) { IRBuilder<> B(Call.getParent()); B.SetInsertPoint(&Call); - B.CreateIntrinsic(Intrinsic::spv_inline_asm, {}, {Args}); + B.CreateIntrinsic(Intrinsic::spv_inline_asm, {Args}); return &Call; } @@ -1808,7 +1808,7 @@ Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &I) { IRBuilder<> B(I.getParent()); B.SetInsertPoint(&I); - B.CreateIntrinsic(Intrinsic::spv_unreachable, {}, {}); + B.CreateIntrinsic(Intrinsic::spv_unreachable, {}); return &I; } diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp index f7482b4686848..d92f7816cbc8d 100644 --- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp @@ -1872,9 +1872,9 @@ void SPIRVGlobalRegistry::buildAssignType(IRBuilder<> &B, Type *Ty, SmallVector<Metadata *, 2> ArgMDs{ MDNode::get(Ctx, ValueAsMetadata::getConstant(OfType)), MDString::get(Ctx, Arg->getName())}; - B.CreateIntrinsic(Intrinsic::spv_value_md, {}, + B.CreateIntrinsic(Intrinsic::spv_value_md, {MetadataAsValue::get(Ctx, MDTuple::get(Ctx, ArgMDs))}); - AssignCI = B.CreateIntrinsic(Intrinsic::fake_use, {}, {Arg}); + AssignCI = B.CreateIntrinsic(Intrinsic::fake_use, {Arg}); } else { AssignCI = buildIntrWithMD(Intrinsic::spv_assign_type, {Arg->getType()}, OfType, Arg, {}, B); diff --git a/llvm/lib/Target/SPIRV/SPIRVStructurizer.cpp b/llvm/lib/Target/SPIRV/SPIRVStructurizer.cpp index d20ea85f75909..20cd1ed5f7e42 100644 --- a/llvm/lib/Target/SPIRV/SPIRVStructurizer.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVStructurizer.cpp @@ -612,7 +612,7 @@ class SPIRVStructurizer : public FunctionPass { auto ContinueAddress = BlockAddress::get(Continue->getParent(), Continue); SmallVector<Value *, 2> Args = {MergeAddress, ContinueAddress}; - Builder.CreateIntrinsic(Intrinsic::spv_loop_merge, {}, {Args}); + Builder.CreateIntrinsic(Intrinsic::spv_loop_merge, {Args}); Modified = true; } diff --git a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp index a44c583a1ca51..c4d349044fe80 100644 --- a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp +++ b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp @@ -1869,7 +1869,7 @@ static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0, // If we were an EXTRQ call, we'll save registers if we convert to EXTRQI. if (II.getIntrinsicID() == Intrinsic::x86_sse4a_extrq) { Value *Args[] = {Op0, CILength, CIIndex}; - return Builder.CreateIntrinsic(Intrinsic::x86_sse4a_extrqi, {}, Args); + return Builder.CreateIntrinsic(Intrinsic::x86_sse4a_extrqi, Args); } } @@ -1966,7 +1966,7 @@ static Value *simplifyX86insertq(IntrinsicInst &II, Value *Op0, Value *Op1, Constant *CIIndex = ConstantInt::get(IntTy8, Index, false); Value *Args[] = {Op0, Op1, CILength, CIIndex}; - return Builder.CreateIntrinsic(Intrinsic::x86_sse4a_insertqi, {}, Args); + return Builder.CreateIntrinsic(Intrinsic::x86_sse4a_insertqi, Args); } return nullptr; diff --git a/llvm/lib/Target/X86/X86LowerAMXType.cpp b/llvm/lib/Target/X86/X86LowerAMXType.cpp index eacdc8a3f639a..54f5977fe76eb 100644 --- a/llvm/lib/Target/X86/X86LowerAMXType.cpp +++ b/llvm/lib/Target/X86/X86LowerAMXType.cpp @@ -380,7 +380,7 @@ void X86LowerAMXType::combineLoadBitcast(LoadInst *LD, BitCastInst *Bitcast) { std::array<Value *, 4> Args = {Row, Col, I8Ptr, Stride}; Value *NewInst = - Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, {}, Args); + Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, Args); Bitcast->replaceAllUsesWith(NewInst); } @@ -405,7 +405,7 @@ void X86LowerAMXType::combineBitcastStore(BitCastInst *Bitcast, StoreInst *ST) { Value *Stride = Builder.getInt64(64); Value *I8Ptr = ST->getOperand(1); std::array<Value *, 5> Args = {Row, Col, I8Ptr, Stride, Tile}; - Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, {}, Args); + Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, Args); if (Bitcast->hasOneUse()) return; // %13 = bitcast x86_amx %src to <256 x i32> @@ -455,7 +455,7 @@ bool X86LowerAMXType::transformBitcast(BitCastInst *Bitcast) { std::tie(Row, Col) = SC->getShape(II, OpNo); std::array<Value *, 4> Args = {Row, Col, I8Ptr, Stride}; Value *NewInst = - Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, {}, Args); + Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, Args); Bitcast->replaceAllUsesWith(NewInst); } else { // %2 = bitcast x86_amx %src to <256 x i32> @@ -472,7 +472,7 @@ bool X86LowerAMXType::transformBitcast(BitCastInst *Bitcast) { Value *Row = II->getOperand(0); Value *Col = II->getOperand(1); std::array<Value *, 5> Args = {Row, Col, I8Ptr, Stride, Src}; - Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, {}, Args); + Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, Args); Value *NewInst = Builder.CreateLoad(Bitcast->getType(), AllocaAddr); Bitcast->replaceAllUsesWith(NewInst); } @@ -612,7 +612,7 @@ static Instruction *createTileStore(Instruction *TileDef, Value *Ptr) { std::array<Value *, 5> Args = {Row, Col, Ptr, Stride, TileDef}; Instruction *TileStore = - Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, {}, Args); + Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, Args); return TileStore; } @@ -643,7 +643,7 @@ static void replaceWithTileLoad(Use &U, Value *Ptr, bool IsPHI = false) { std::array<Value *, 4> Args = {Row, Col, Ptr, Stride}; Value *TileLoad = - Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, {}, Args); + Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, Args); UserI->replaceUsesOfWith(V, TileLoad); } @@ -1124,7 +1124,7 @@ bool X86LowerAMXCast::combineCastStore(IntrinsicInst *Cast, StoreInst *ST) { Value *Stride = Builder.CreateSExt(Col, Builder.getInt64Ty()); Value *I8Ptr = Builder.CreateBitCast(ST->getOperand(1), Builder.getPtrTy()); std::array<Value *, 5> Args = {Row, Col, I8Ptr, Stride, Tile}; - Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, {}, Args); + Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, Args); return true; } @@ -1169,7 +1169,7 @@ bool X86LowerAMXCast::combineLoadCast(IntrinsicInst *Cast, LoadInst *LD) { std::array<Value *, 4> Args = {Row, Col, I8Ptr, Stride}; Value *NewInst = - Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, {}, Args); + Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, Args); Cast->replaceAllUsesWith(NewInst); return EraseLoad; @@ -1357,7 +1357,7 @@ bool X86LowerAMXCast::transformAMXCast(IntrinsicInst *AMXCast) { std::array<Value *, 4> Args = { Row, Col, I8Ptr, Builder.CreateSExt(Col, Builder.getInt64Ty())}; Value *NewInst = - Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, {}, Args); + Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, Args); AMXCast->replaceAllUsesWith(NewInst); AMXCast->eraseFromParent(); } else { @@ -1376,7 +1376,7 @@ bool X86LowerAMXCast::transformAMXCast(IntrinsicInst *AMXCast) { Value *Col = II->getOperand(1); std::array<Value *, 5> Args = { Row, Col, I8Ptr, Builder.CreateSExt(Col, Builder.getInt64Ty()), Src}; - Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, {}, Args); + Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, Args); Value *NewInst = Builder.CreateLoad(AMXCast->getType(), AllocaAddr); AMXCast->replaceAllUsesWith(NewInst); AMXCast->eraseFromParent(); diff --git a/llvm/lib/Target/X86/X86WinEHState.cpp b/llvm/lib/Target/X86/X86WinEHState.cpp index 7d6d3f8d21f25..7c1c712b6fb57 100644 --- a/llvm/lib/Target/X86/X86WinEHState.cpp +++ b/llvm/lib/Target/X86/X86WinEHState.cpp @@ -374,7 +374,7 @@ void WinEHStatePass::emitExceptionRegistrationRecord(Function *F) { } Value *WinEHStatePass::emitEHLSDA(IRBuilder<> &Builder, Function *F) { - return Builder.CreateIntrinsic(Intrinsic::x86_seh_lsda, {}, F); + return Builder.CreateIntrinsic(Intrinsic::x86_seh_lsda, F); } /// Generate a thunk that puts the LSDA of ParentFunc in EAX and then calls @@ -649,13 +649,13 @@ void WinEHStatePass::addStateStores(Function &F, WinEHFuncInfo &FuncInfo) { // that it can recover the original frame pointer. IRBuilder<> Builder(RegNode->getNextNode()); Value *RegNodeI8 = Builder.CreateBitCast(RegNode, Builder.getPtrTy()); - Builder.CreateIntrinsic(Intrinsic::x86_seh_ehregnode, {}, {RegNodeI8}); + Builder.CreateIntrinsic(Intrinsic::x86_seh_ehregnode, {RegNodeI8}); if (EHGuardNode) { IRBuilder<> Builder(EHGuardNode->getNextNode()); Value *EHGuardNodeI8 = Builder.CreateBitCast(EHGuardNode, Builder.getPtrTy()); - Builder.CreateIntrinsic(Intrinsic::x86_seh_ehguard, {}, {EHGuardNodeI8}); + Builder.CreateIntrinsic(Intrinsic::x86_seh_ehguard, {EHGuardNodeI8}); } // Calculate state numbers. diff --git a/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp b/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp index 31528bf1f0fae..3870e80f9559b 100644 --- a/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp +++ b/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp @@ -156,7 +156,7 @@ bool XCoreLowerThreadLocal::lowerGlobal(GlobalVariable *GV) { for (User *U : Users) { Instruction *Inst = cast<Instruction>(U); IRBuilder<> Builder(Inst); - Value *ThreadID = Builder.CreateIntrinsic(Intrinsic::xcore_getid, {}, {}); + Value *ThreadID = Builder.CreateIntrinsic(Intrinsic::xcore_getid, {}); Value *Addr = Builder.CreateInBoundsGEP(NewGV->getValueType(), NewGV, {Builder.getInt64(0), ThreadID}); U->replaceUsesOfWith(GV, Addr); diff --git a/llvm/lib/Transforms/IPO/CrossDSOCFI.cpp b/llvm/lib/Transforms/IPO/CrossDSOCFI.cpp index 2d884078940cc..a848eac6f3e4c 100644 --- a/llvm/lib/Transforms/IPO/CrossDSOCFI.cpp +++ b/llvm/lib/Transforms/IPO/CrossDSOCFI.cpp @@ -126,7 +126,7 @@ void CrossDSOCFI::buildCFICheck(Module &M) { IRBuilder<> IRBTest(TestBB); Value *Test = IRBTest.CreateIntrinsic( - Intrinsic::type_test, {}, + Intrinsic::type_test, {&Addr, MetadataAsValue::get(Ctx, ConstantAsMetadata::get(CaseTypeId))}); BranchInst *BI = IRBTest.CreateCondBr(Test, ExitBB, TrapBB); diff --git a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp index 14c331b3b748e..9239ae8741afb 100644 --- a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp +++ b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp @@ -111,10 +111,10 @@ static Value *getBoundsCheckCond(Value *Ptr, Value *InstVal, static CallInst *InsertTrap(BuilderTy &IRB, bool DebugTrapBB, std::optional<int8_t> GuardKind) { if (!DebugTrapBB) - return IRB.CreateIntrinsic(Intrinsic::trap, {}, {}); + return IRB.CreateIntrinsic(Intrinsic::trap, {}); return IRB.CreateIntrinsic( - Intrinsic::ubsantrap, {}, + Intrinsic::ubsantrap, ConstantInt::get(IRB.getInt8Ty(), GuardKind.has_value() ? GuardKind.value() diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp index 65bb9c33e1772..61dfc6411fc3a 100644 --- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp @@ -1007,14 +1007,13 @@ void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite, UseShortGranules ? Intrinsic::hwasan_check_memaccess_shortgranules_fixedshadow : Intrinsic::hwasan_check_memaccess_fixedshadow, - {}, {Ptr, ConstantInt::get(Int32Ty, AccessInfo), ConstantInt::get(Int64Ty, Mapping.offset())}); } else { IRB.CreateIntrinsic( UseShortGranules ? Intrinsic::hwasan_check_memaccess_shortgranules : Intrinsic::hwasan_check_memaccess, - {}, {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)}); + {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)}); } } diff --git a/llvm/lib/Transforms/Instrumentation/KCFI.cpp b/llvm/lib/Transforms/Instrumentation/KCFI.cpp index 38fc99429122d..bfed678854943 100644 --- a/llvm/lib/Transforms/Instrumentation/KCFI.cpp +++ b/llvm/lib/Transforms/Instrumentation/KCFI.cpp @@ -109,7 +109,7 @@ PreservedAnalyses KCFIPass::run(Function &F, FunctionAnalysisManager &AM) { Instruction *ThenTerm = SplitBlockAndInsertIfThen(Test, Call, false, VeryUnlikelyWeights); Builder.SetInsertPoint(ThenTerm); - Builder.CreateIntrinsic(Intrinsic::debugtrap, {}, {}); + Builder.CreateIntrinsic(Intrinsic::debugtrap, {}); ++NumKCFIChecks; } diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index e330c7c89b0c5..726fcbdead70f 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1208,7 +1208,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { MS.initializeCallbacks(*F.getParent(), TLI); FnPrologueEnd = IRBuilder<>(&F.getEntryBlock(), F.getEntryBlock().getFirstNonPHIIt()) - .CreateIntrinsic(Intrinsic::donothing, {}, {}); + .CreateIntrinsic(Intrinsic::donothing, {}); if (MS.CompileKernel) { IRBuilder<> IRB(FnPrologueEnd); @@ -3539,7 +3539,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } Value *S = IRB.CreateIntrinsic(getSignedPackIntrinsic(I.getIntrinsicID()), - {}, {S1_ext, S2_ext}, /*FMFSource=*/nullptr, + {S1_ext, S2_ext}, /*FMFSource=*/nullptr, "_msprop_vector_pack"); if (MMXEltSizeInBits) S = IRB.CreateBitCast(S, getShadowTy(&I)); diff --git a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp index 1fca354e3825f..7c73c16db02c8 100644 --- a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp +++ b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp @@ -956,7 +956,7 @@ void FunctionInstrumenter::instrument() { // llvm.instrprof.cover(i8* <name>, i64 <hash>, i32 <num-counters>, // i32 <index>) Builder.CreateIntrinsic( - Intrinsic::instrprof_cover, {}, + Intrinsic::instrprof_cover, {NormalizedNamePtr, CFGHash, Builder.getInt32(1), Builder.getInt32(0)}); return; } @@ -1013,7 +1013,7 @@ void FunctionInstrumenter::instrument() { IRBuilder<> Builder(&EntryBB, EntryBB.getFirstInsertionPt()); // llvm.instrprof.timestamp(i8* <name>, i64 <hash>, i32 <num-counters>, // i32 <index>) - Builder.CreateIntrinsic(Intrinsic::instrprof_timestamp, {}, + Builder.CreateIntrinsic(Intrinsic::instrprof_timestamp, {NormalizedNamePtr, CFGHash, Builder.getInt32(NumCounters), Builder.getInt32(I)}); @@ -1028,7 +1028,6 @@ void FunctionInstrumenter::instrument() { // i32 <index>) Builder.CreateIntrinsic(PGOBlockCoverage ? Intrinsic::instrprof_cover : Intrinsic::instrprof_increment, - {}, {NormalizedNamePtr, CFGHash, Builder.getInt32(NumCounters), Builder.getInt32(I++)}); @@ -1772,7 +1771,7 @@ void SelectInstVisitor::instrumentOneSelectInst(SelectInst &SI) { auto *NormalizedFuncNameVarPtr = ConstantExpr::getPointerBitCastOrAddrSpaceCast( FuncNameVar, PointerType::get(M->getContext(), 0)); - Builder.CreateIntrinsic(Intrinsic::instrprof_increment_step, {}, + Builder.CreateIntrinsic(Intrinsic::instrprof_increment_step, {NormalizedFuncNameVarPtr, Builder.getInt64(FuncHash), Builder.getInt32(TotalNumCtrs), Builder.getInt32(*CurCtrIdx), Step}); diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index 2b403b695c1d2..1811d145f9907 100644 --- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -573,7 +573,7 @@ bool ThreadSanitizer::sanitizeFunction(Function &F, InstrumentationIRBuilder IRB(&F.getEntryBlock(), F.getEntryBlock().getFirstNonPHIIt()); Value *ReturnAddress = - IRB.CreateIntrinsic(Intrinsic::returnaddress, {}, IRB.getInt32(0)); + IRB.CreateIntrinsic(Intrinsic::returnaddress, IRB.getInt32(0)); IRB.CreateCall(TsanFuncEntry, ReturnAddress); EscapeEnumerator EE(F, "tsan_cleanup", ClHandleCxxExceptions); diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index 86be20c799a68..4e444d8d4cefc 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -3876,7 +3876,7 @@ class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> { for (Instruction *I : FakeUses) { IRB.SetInsertPoint(I); for (auto *V : Components) - IRB.CreateIntrinsic(Intrinsic::fake_use, {}, {V}); + IRB.CreateIntrinsic(Intrinsic::fake_use, {V}); I->eraseFromParent(); } } diff --git a/llvm/lib/Transforms/Utils/GuardUtils.cpp b/llvm/lib/Transforms/Utils/GuardUtils.cpp index dfcfddaca145f..46ad951d0a812 100644 --- a/llvm/lib/Transforms/Utils/GuardUtils.cpp +++ b/llvm/lib/Transforms/Utils/GuardUtils.cpp @@ -71,7 +71,7 @@ void llvm::makeGuardControlFlowExplicit(Function *DeoptIntrinsic, // guard's condition. IRBuilder<> B(CheckBI); auto *WC = B.CreateIntrinsic(Intrinsic::experimental_widenable_condition, - {}, {}, nullptr, "widenable_cond"); + {}, nullptr, "widenable_cond"); CheckBI->setCondition(B.CreateAnd(CheckBI->getCondition(), WC, "exiplicit_guard_cond")); assert(isWidenableBranch(CheckBI) && "Branch must be widenable."); diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp index 1404867fda6bc..131fbe654c11c 100644 --- a/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -2159,7 +2159,7 @@ inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, // call. if (IsUnsafeClaimRV) { Builder.SetInsertPoint(II); - Builder.CreateIntrinsic(Intrinsic::objc_release, {}, RetOpnd); + Builder.CreateIntrinsic(Intrinsic::objc_release, RetOpnd); } II->eraseFromParent(); InsertRetainCall = false; @@ -2193,7 +2193,7 @@ inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, // matching autoreleaseRV or an annotated call in the callee. Emit a call // to objc_retain. Builder.SetInsertPoint(RI); - Builder.CreateIntrinsic(Intrinsic::objc_retain, {}, RetOpnd); + Builder.CreateIntrinsic(Intrinsic::objc_retain, RetOpnd); } } } diff --git a/llvm/unittests/IR/IRBuilderTest.cpp b/llvm/unittests/IR/IRBuilderTest.cpp index e9e9d7b11a36c..b7eb0af728331 100644 --- a/llvm/unittests/IR/IRBuilderTest.cpp +++ b/llvm/unittests/IR/IRBuilderTest.cpp @@ -81,7 +81,12 @@ TEST_F(IRBuilderTest, Intrinsics) { II = cast<IntrinsicInst>(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::maximum); - Result = Builder.CreateIntrinsic(Intrinsic::readcyclecounter, {}, {}); + Result = Builder.CreateIntrinsic(Intrinsic::readcyclecounter, + ArrayRef<Type *>{}, {}); + II = cast<IntrinsicInst>(Result); + EXPECT_EQ(II->getIntrinsicID(), Intrinsic::readcyclecounter); + + Result = Builder.CreateIntrinsic(Intrinsic::readcyclecounter, {}); II = cast<IntrinsicInst>(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::readcyclecounter); @@ -134,7 +139,7 @@ TEST_F(IRBuilderTest, Intrinsics) { EXPECT_FALSE(II->hasNoNaNs()); Result = Builder.CreateIntrinsic( - Intrinsic::set_rounding, {}, + Intrinsic::set_rounding, {Builder.getInt32(static_cast<uint32_t>(RoundingMode::TowardZero))}); II = cast<IntrinsicInst>(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::set_rounding); @@ -174,17 +179,17 @@ TEST_F(IRBuilderTest, IntrinsicsWithScalableVectors) { Type *DstVecTy = VectorType::get(Builder.getInt32Ty(), 4, true); Type *PredTy = VectorType::get(Builder.getInt1Ty(), 4, true); - SmallVector<Value*, 3> ArgTys; - ArgTys.push_back(UndefValue::get(DstVecTy)); - ArgTys.push_back(UndefValue::get(PredTy)); - ArgTys.push_back(UndefValue::get(SrcVecTy)); + SmallVector<Value *, 3> Args; + Args.push_back(UndefValue::get(DstVecTy)); + Args.push_back(UndefValue::get(PredTy)); + Args.push_back(UndefValue::get(SrcVecTy)); - Call = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_fcvtzs_i32f16, {}, - ArgTys, nullptr, "aarch64.sve.fcvtzs.i32f16"); + Call = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_fcvtzs_i32f16, Args, + nullptr, "aarch64.sve.fcvtzs.i32f16"); FTy = Call->getFunctionType(); EXPECT_EQ(FTy->getReturnType(), DstVecTy); - for (unsigned i = 0; i != ArgTys.size(); ++i) - EXPECT_EQ(FTy->getParamType(i), ArgTys[i]->getType()); + for (unsigned i = 0; i != Args.size(); ++i) + EXPECT_EQ(FTy->getParamType(i), Args[i]->getType()); // Test scalable flag isn't dropped for intrinsic defined with // LLVMScalarOrSameVectorWidth. @@ -193,19 +198,18 @@ TEST_F(IRBuilderTest, IntrinsicsWithScalableVectors) { Type *PtrToVecTy = Builder.getPtrTy(); PredTy = VectorType::get(Builder.getInt1Ty(), 4, true); - ArgTys.clear(); - ArgTys.push_back(UndefValue::get(PtrToVecTy)); - ArgTys.push_back(UndefValue::get(Builder.getInt32Ty())); - ArgTys.push_back(UndefValue::get(PredTy)); - ArgTys.push_back(UndefValue::get(VecTy)); + Args.clear(); + Args.push_back(UndefValue::get(PtrToVecTy)); + Args.push_back(UndefValue::get(Builder.getInt32Ty())); + Args.push_back(UndefValue::get(PredTy)); + Args.push_back(UndefValue::get(VecTy)); - Call = Builder.CreateIntrinsic(Intrinsic::masked_load, - {VecTy, PtrToVecTy}, ArgTys, - nullptr, "masked.load"); + Call = Builder.CreateIntrinsic(Intrinsic::masked_load, {VecTy, PtrToVecTy}, + Args, nullptr, "masked.load"); FTy = Call->getFunctionType(); EXPECT_EQ(FTy->getReturnType(), VecTy); - for (unsigned i = 0; i != ArgTys.size(); ++i) - EXPECT_EQ(FTy->getParamType(i), ArgTys[i]->getType()); + for (unsigned i = 0; i != Args.size(); ++i) + EXPECT_EQ(FTy->getParamType(i), Args[i]->getType()); } TEST_F(IRBuilderTest, CreateVScale) { diff --git a/llvm/unittests/Transforms/Utils/LocalTest.cpp b/llvm/unittests/Transforms/Utils/LocalTest.cpp index 3b3c45d969971..8d8f991e9ea49 100644 --- a/llvm/unittests/Transforms/Utils/LocalTest.cpp +++ b/llvm/unittests/Transforms/Utils/LocalTest.cpp @@ -1243,8 +1243,8 @@ TEST(Local, CanReplaceOperandWithVariable) { // immarg. Type *PtrPtr = B.getPtrTy(0); Value *Alloca = B.CreateAlloca(PtrPtr, (unsigned)0); - CallInst *GCRoot = B.CreateIntrinsic(Intrinsic::gcroot, {}, - {Alloca, Constant::getNullValue(PtrPtr)}); + CallInst *GCRoot = B.CreateIntrinsic( + Intrinsic::gcroot, {Alloca, Constant::getNullValue(PtrPtr)}); EXPECT_TRUE(canReplaceOperandWithVariable(GCRoot, 0)); // Alloca EXPECT_FALSE(canReplaceOperandWithVariable(GCRoot, 1)); EXPECT_FALSE(canReplaceOperandWithVariable(GCRoot, 2)); _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits