llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT--> @llvm/pr-subscribers-llvm-analysis Author: Paschalis Mpeis (paschalis-mpeis) <details> <summary>Changes</summary> Teach LAA to consider safe specific math lib calls which are known to have set the memory write-only attribute. Those attributes are set to calls by `inferNonMandatoryLibFuncAttrs`, in BuildLibCalls.cpp, and the current ones are `modf`/`modff` and `frexp`/`frexpf`. This happens only when the calls are found through TLI to have vectorized counterparts. --- Full diff: https://github.com/llvm/llvm-project/pull/78432.diff 2 Files Affected: - (added) clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c (+54) - (modified) llvm/lib/Analysis/LoopAccessAnalysis.cpp (+19) ``````````diff diff --git a/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c new file mode 100644 index 000000000000000..957b3f5cb235d31 --- /dev/null +++ b/clang/test/CodeGen/aarch64-veclib-function-calls-linear-ptrs.c @@ -0,0 +1,54 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --filter "call.*(frexp|modf)" --version 4 +// RUN: %clang --target=aarch64-linux-gnu -march=armv8-a+sve -O3 -mllvm -vector-library=ArmPL -mllvm -force-vector-interleave=1 -mllvm -prefer-predicate-over-epilogue=predicate-dont-vectorize -emit-llvm -S -o - %s | FileCheck %s + +// REQUIRES: aarch64-registered-target + +/* +Testing vectorization of math functions that have the attribute write-only to +memory set. Given they have vectorized counterparts, they should be able to +vectorize. +*/ + +// The following define is required to access some math functions. +#define _GNU_SOURCE +#include <math.h> + +// frexp/frexpf have no TLI mappings yet. + +// CHECK-LABEL: define dso_local void @frexp_f64( +// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] { +// CHECK: [[CALL:%.*]] = tail call double @frexp(double noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR5:[0-9]+]] +// +void frexp_f64(double *in, double *out1, int *out2, int N) { + for (int i = 0; i < N; ++i) + *out1 = frexp(in[i], out2+i); +} + +// CHECK-LABEL: define dso_local void @frexp_f32( +// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] { +// CHECK: [[CALL:%.*]] = tail call float @frexpf(float noundef [[TMP0:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR5]] +// +void frexp_f32(float *in, float *out1, int *out2, int N) { + for (int i = 0; i < N; ++i) + *out1 = frexpf(in[i], out2+i); +} + +// CHECK-LABEL: define dso_local void @modf_f64( +// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] { +// CHECK: [[TMP11:%.*]] = tail call <vscale x 2 x double> @armpl_svmodf_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP10:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]]) +// CHECK: [[CALL:%.*]] = tail call double @modf(double noundef [[TMP14:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR6:[0-9]+]] +// +void modf_f64(double *in, double *out1, double *out2, int N) { + for (int i = 0; i < N; ++i) + out1[i] = modf(in[i], out2+i); +} + +// CHECK-LABEL: define dso_local void @modf_f32( +// CHECK-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef writeonly [[OUT1:%.*]], ptr nocapture noundef writeonly [[OUT2:%.*]], i32 noundef [[N:%.*]]) local_unnamed_addr #[[ATTR0]] { +// CHECK: [[TMP11:%.*]] = tail call <vscale x 4 x float> @armpl_svmodf_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP10:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]]) +// CHECK: [[CALL:%.*]] = tail call float @modff(float noundef [[TMP14:%.*]], ptr noundef [[ADD_PTR:%.*]]) #[[ATTR7:[0-9]+]] +// +void modf_f32(float *in, float *out1, float *out2, int N) { + for (int i = 0; i < N; ++i) + out1[i] = modff(in[i], out2+i); +} diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp index aed60cc5a3f5ef0..0c8b4e51fcf5c16 100644 --- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp +++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp @@ -2274,6 +2274,20 @@ bool LoopAccessInfo::canAnalyzeLoop() { return true; } +/// Returns whether \p I is a known math library call that has memory write-only +/// attribute set. +static bool isMathLibCallMemWriteOnly(const TargetLibraryInfo *TLI, + const Instruction &I) { + auto *Call = dyn_cast<CallInst>(&I); + if (!Call) + return false; + + LibFunc Func; + TLI->getLibFunc(*Call, Func); + return Func == LibFunc::LibFunc_modf || Func == LibFunc::LibFunc_modff || + Func == LibFunc::LibFunc_frexp || Func == LibFunc::LibFunc_frexpf; +} + void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI, const TargetLibraryInfo *TLI, DominatorTree *DT) { @@ -2364,6 +2378,11 @@ void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI, // Save 'store' instructions. Abort if other instructions write to memory. if (I.mayWriteToMemory()) { + // We can safety handle math functions that have vectorized + // counterparts and have the memory write-only attribute set. + if (isMathLibCallMemWriteOnly(TLI, I)) + continue; + auto *St = dyn_cast<StoreInst>(&I); if (!St) { recordAnalysis("CantVectorizeInstruction", St) `````````` </details> https://github.com/llvm/llvm-project/pull/78432 _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits