Author: Arthur Eubanks Date: 2021-01-22T12:29:39-08:00 New Revision: 42d682a217b6e04318d11d374e29d7d94ceaed1f
URL: https://github.com/llvm/llvm-project/commit/42d682a217b6e04318d11d374e29d7d94ceaed1f DIFF: https://github.com/llvm/llvm-project/commit/42d682a217b6e04318d11d374e29d7d94ceaed1f.diff LOG: [NewPM][AMDGPU] Skip adding CGSCCOptimizerLate callbacks at O0 The legacy PM's EP_CGSCCOptimizerLate was only used under not-O0. Fixes clang/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist.cpp under the new PM. Reviewed By: arsenm Differential Revision: https://reviews.llvm.org/D95250 Added: Modified: llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp Removed: ################################################################################ diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index 38e3966cc150..ce7c82e2a88a 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -579,24 +579,27 @@ void AMDGPUTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB, PB.registerCGSCCOptimizerLateEPCallback( [this, DebugPassManager](CGSCCPassManager &PM, PassBuilder::OptimizationLevel Level) { - FunctionPassManager FPM(DebugPassManager); + if (Level == PassBuilder::OptimizationLevel::O0) + return; - // Add infer address spaces pass to the opt pipeline after inlining - // but before SROA to increase SROA opportunities. - FPM.addPass(InferAddressSpacesPass()); + FunctionPassManager FPM(DebugPassManager); - // This should run after inlining to have any chance of doing - // anything, and before other cleanup optimizations. - FPM.addPass(AMDGPULowerKernelAttributesPass()); + // Add infer address spaces pass to the opt pipeline after inlining + // but before SROA to increase SROA opportunities. + FPM.addPass(InferAddressSpacesPass()); - if (Level != PassBuilder::OptimizationLevel::O0) { - // Promote alloca to vector before SROA and loop unroll. If we - // manage to eliminate allocas before unroll we may choose to unroll - // less. - FPM.addPass(AMDGPUPromoteAllocaToVectorPass(*this)); - } + // This should run after inlining to have any chance of doing + // anything, and before other cleanup optimizations. + FPM.addPass(AMDGPULowerKernelAttributesPass()); + + if (Level != PassBuilder::OptimizationLevel::O0) { + // Promote alloca to vector before SROA and loop unroll. If we + // manage to eliminate allocas before unroll we may choose to unroll + // less. + FPM.addPass(AMDGPUPromoteAllocaToVectorPass(*this)); + } - PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM))); + PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM))); }); } _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits