https://github.com/thurstond created 
https://github.com/llvm/llvm-project/pull/125037

This reverts commit 928cad49beec0120686478f502899222e836b545 i.e., relands 
dccd27112722109d2e2f03e8da9ce8690f06e11b, with a fix to avoid use-after-scope 
by changing the lambda to capture by value.
    
    
    


>From a5491f8a078f912c6a2de9b33f1915ca88e679d2 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurs...@google.com>
Date: Thu, 30 Jan 2025 07:04:13 +0000
Subject: [PATCH 1/2] Reapply "[ubsan] Connect -fsanitize-skip-hot-cutoff to
 LowerAllowCheckPass<cutoffs>" (#125032)

This reverts commit 928cad49beec0120686478f502899222e836b545.
---
 clang/include/clang/Basic/Sanitizers.h        |   4 +
 clang/lib/Basic/Sanitizers.cpp                |  22 +++
 clang/lib/CodeGen/BackendUtil.cpp             |  10 +-
 clang/lib/CodeGen/CGExpr.cpp                  |  28 ++--
 clang/test/CodeGen/allow-ubsan-check-inline.c |   5 +
 clang/test/CodeGen/allow-ubsan-check.c        | 143 +++++++++++-------
 6 files changed, 141 insertions(+), 71 deletions(-)

diff --git a/clang/include/clang/Basic/Sanitizers.h 
b/clang/include/clang/Basic/Sanitizers.h
index fc0576d452b17f..1aa7476f9d8e0a 100644
--- a/clang/include/clang/Basic/Sanitizers.h
+++ b/clang/include/clang/Basic/Sanitizers.h
@@ -162,6 +162,10 @@ class SanitizerMaskCutoffs {
 
   void set(SanitizerMask K, double V);
   void clear(SanitizerMask K = SanitizerKind::All);
+
+  // Returns nullopt if all the values are zero.
+  // Otherwise, return value contains a vector of all the scaled values.
+  std::optional<std::vector<unsigned>> getAllScaled(unsigned ScalingFactor) 
const;
 };
 
 struct SanitizerSet {
diff --git a/clang/lib/Basic/Sanitizers.cpp b/clang/lib/Basic/Sanitizers.cpp
index 5b9b88d032702f..e0eff2a37b8a0d 100644
--- a/clang/lib/Basic/Sanitizers.cpp
+++ b/clang/lib/Basic/Sanitizers.cpp
@@ -18,6 +18,7 @@
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/raw_ostream.h"
 #include <algorithm>
+#include <cmath>
 #include <optional>
 
 using namespace clang;
@@ -43,6 +44,27 @@ std::optional<double> 
SanitizerMaskCutoffs::operator[](unsigned Kind) const {
 
 void SanitizerMaskCutoffs::clear(SanitizerMask K) { set(K, 0); }
 
+std::optional<std::vector<unsigned>>
+SanitizerMaskCutoffs::getAllScaled(unsigned ScalingFactor) const {
+  std::vector<unsigned> ScaledCutoffs;
+
+  bool AnyCutoff = false;
+  for (unsigned int i = 0; i < SanitizerKind::SO_Count; ++i) {
+    auto C = (*this)[i];
+    if (C.has_value()) {
+      ScaledCutoffs.push_back(lround(std::clamp(*C, 0.0, 1.0) * 
ScalingFactor));
+      AnyCutoff = true;
+    } else {
+      ScaledCutoffs.push_back(0);
+    }
+  }
+
+  if (AnyCutoff)
+    return ScaledCutoffs;
+
+  return std::nullopt;
+}
+
 // Once LLVM switches to C++17, the constexpr variables can be inline and we
 // won't need this.
 #define SANITIZER(NAME, ID) constexpr SanitizerMask SanitizerKind::ID;
diff --git a/clang/lib/CodeGen/BackendUtil.cpp 
b/clang/lib/CodeGen/BackendUtil.cpp
index 3e65eeb3755d2f..bc32878d9669ec 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -795,12 +795,20 @@ static void addSanitizers(const Triple &TargetTriple,
     PB.registerOptimizerLastEPCallback(SanitizersCallback);
   }
 
-  if (LowerAllowCheckPass::IsRequested()) {
+  // SanitizeSkipHotCutoffs: doubles with range [0, 1]
+  // Opts.cutoffs: unsigned ints with range [0, 1000000]
+  auto ScaledCutoffs = 
CodeGenOpts.SanitizeSkipHotCutoffs.getAllScaled(1000000);
+
+  // TODO: remove IsRequested()
+  if (LowerAllowCheckPass::IsRequested() || ScaledCutoffs.has_value()) {
     // We want to call it after inline, which is about 
OptimizerEarlyEPCallback.
     PB.registerOptimizerEarlyEPCallback([&](ModulePassManager &MPM,
                                             OptimizationLevel Level,
                                             ThinOrFullLTOPhase Phase) {
       LowerAllowCheckPass::Options Opts;
+      // TODO: after removing IsRequested(), make this unconditional
+      if (ScaledCutoffs.has_value())
+        Opts.cutoffs = ScaledCutoffs.value();
       
MPM.addPass(createModuleToFunctionPassAdaptor(LowerAllowCheckPass(Opts)));
     });
   }
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 9676e61cf322d9..bf8df2789f58db 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -3614,29 +3614,33 @@ void CodeGenFunction::EmitCheck(
   llvm::Value *RecoverableCond = nullptr;
   llvm::Value *TrapCond = nullptr;
   bool NoMerge = false;
+  // Expand checks into:
+  //   (Check1 || !allow_ubsan_check) && (Check2 || !allow_ubsan_check) ...
+  // We need separate allow_ubsan_check intrinsics because they have separately
+  // specified cutoffs.
+  // This expression looks expensive but will be simplified after
+  // LowerAllowCheckPass.
   for (auto &[Check, Ord] : Checked) {
+    llvm::Value *GuardedCheck = Check;
+    if (ClSanitizeGuardChecks ||
+        (CGM.getCodeGenOpts().SanitizeSkipHotCutoffs[Ord] > 0)) {
+      llvm::Value *Allow = Builder.CreateCall(
+          CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
+          llvm::ConstantInt::get(CGM.Int8Ty, Ord));
+      GuardedCheck = Builder.CreateOr(Check, Builder.CreateNot(Allow));
+    }
+
     // -fsanitize-trap= overrides -fsanitize-recover=.
     llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(Ord) ? TrapCond
                          : CGM.getCodeGenOpts().SanitizeRecover.has(Ord)
                              ? RecoverableCond
                              : FatalCond;
-    Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
+    Cond = Cond ? Builder.CreateAnd(Cond, GuardedCheck) : GuardedCheck;
 
     if (!CGM.getCodeGenOpts().SanitizeMergeHandlers.has(Ord))
       NoMerge = true;
   }
 
-  if (ClSanitizeGuardChecks) {
-    llvm::Value *Allow =
-        
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
-                           llvm::ConstantInt::get(CGM.Int8Ty, CheckHandler));
-
-    for (llvm::Value **Cond : {&FatalCond, &RecoverableCond, &TrapCond}) {
-      if (*Cond)
-        *Cond = Builder.CreateOr(*Cond, Builder.CreateNot(Allow));
-    }
-  }
-
   if (TrapCond)
     EmitTrapCheck(TrapCond, CheckHandler, NoMerge);
   if (!FatalCond && !RecoverableCond)
diff --git a/clang/test/CodeGen/allow-ubsan-check-inline.c 
b/clang/test/CodeGen/allow-ubsan-check-inline.c
index 1de24ab90dac0e..eed48cf15ecca4 100644
--- a/clang/test/CodeGen/allow-ubsan-check-inline.c
+++ b/clang/test/CodeGen/allow-ubsan-check-inline.c
@@ -1,3 +1,8 @@
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -o - %s 
-fsanitize=signed-integer-overflow 
-fsanitize-skip-hot-cutoff=signed-integer-overflow=0.000001 -O3 -mllvm 
-lower-allow-check-random-rate=1 -Rpass=lower-allow-check 
-Rpass-missed=lower-allow-check -fno-inline 2>&1 | FileCheck %s 
--check-prefixes=NOINL --implicit-check-not="remark:"
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -o - %s 
-fsanitize=signed-integer-overflow 
-fsanitize-skip-hot-cutoff=signed-integer-overflow=0.000001 -O3 -mllvm 
-lower-allow-check-random-rate=1 -Rpass=lower-allow-check 
-Rpass-missed=lower-allow-check 2>&1 | FileCheck %s --check-prefixes=INLINE 
--implicit-check-not="remark:"
+//
+// -ubsan-guard-checks is deprecated and will be removed in the future;
+// use -fsanitize-skip-hot-cutoff, as shown above.
 // RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -o - %s 
-fsanitize=signed-integer-overflow -mllvm -ubsan-guard-checks -O3 -mllvm 
-lower-allow-check-random-rate=1 -Rpass=lower-allow-check 
-Rpass-missed=lower-allow-check -fno-inline 2>&1 | FileCheck %s 
--check-prefixes=NOINL --implicit-check-not="remark:"
 // RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -o - %s 
-fsanitize=signed-integer-overflow -mllvm -ubsan-guard-checks -O3 -mllvm 
-lower-allow-check-random-rate=1 -Rpass=lower-allow-check 
-Rpass-missed=lower-allow-check 2>&1 | FileCheck %s --check-prefixes=INLINE 
--implicit-check-not="remark:"
 
diff --git a/clang/test/CodeGen/allow-ubsan-check.c 
b/clang/test/CodeGen/allow-ubsan-check.c
index b88c1f9cb220d7..0cd81a77f5cc59 100644
--- a/clang/test/CodeGen/allow-ubsan-check.c
+++ b/clang/test/CodeGen/allow-ubsan-check.c
@@ -1,4 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py 
UTC_ARGS: --version 5
+//
+// We can't use -fsanitize-skip-hot-cutoff because that includes both 
-ubsan-guard-checks and
+//-lower-allow-check-percentile-cutoff.
 // RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -O1 -o - %s 
-fsanitize=signed-integer-overflow,integer-divide-by-zero,null,local-bounds 
-mllvm -ubsan-guard-checks | FileCheck %s
 // RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -O1 -o - %s 
-fsanitize=signed-integer-overflow,integer-divide-by-zero,null,local-bounds 
-mllvm -ubsan-guard-checks 
-fsanitize-trap=signed-integer-overflow,integer-divide-by-zero,null,local-bounds
 | FileCheck %s --check-prefixes=TR
 // RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -O1 -o - %s 
-fsanitize=signed-integer-overflow,integer-divide-by-zero,null,local-bounds 
-mllvm -ubsan-guard-checks 
-fsanitize-recover=signed-integer-overflow,integer-divide-by-zero,null,local-bounds
 | FileCheck %s --check-prefixes=REC
@@ -7,18 +10,26 @@
 // CHECK-LABEL: define dso_local noundef i32 @div(
 // CHECK-SAME: i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) 
local_unnamed_addr #[[ATTR0:[0-9]+]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = icmp eq i32 [[Y]], 0, !nosanitize 
[[META2:![0-9]+]]
-// CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[X]], -2147483648, !nosanitize 
[[META2]]
-// CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[Y]], -1, !nosanitize [[META2]]
-// CHECK-NEXT:    [[OR_NOT5:%.*]] = and i1 [[TMP1]], [[TMP2]]
-// CHECK-NEXT:    [[DOTNOT3:%.*]] = or i1 [[TMP0]], [[OR_NOT5]]
-// CHECK-NEXT:    [[TMP3:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 3), 
!nosanitize [[META2]]
-// CHECK-NEXT:    [[DOTNOT1:%.*]] = and i1 [[DOTNOT3]], [[TMP3]]
-// CHECK-NEXT:    br i1 [[DOTNOT1]], label %[[HANDLER_DIVREM_OVERFLOW:.*]], 
label %[[CONT:.*]], !prof [[PROF3:![0-9]+]], !nosanitize [[META2]]
+// CHECK-NEXT:    [[TMP0:%.*]] = icmp ne i32 [[Y]], 0, !nosanitize 
[[META2:![0-9]+]]
+// CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[X]], -2147483648, !nosanitize 
[[META2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[Y]], -1, !nosanitize [[META2]]
+// CHECK-NEXT:    [[OR:%.*]] = or i1 [[TMP1]], [[TMP2]], !nosanitize [[META2]]
+//
+//                                                                       27 == 
SO_IntegerDivideByZero
+// CHECK-NEXT:    [[TMP3:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 27), 
!nosanitize [[META2]]
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i1 [[TMP3]], true, !nosanitize [[META2]]
+// CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[TMP0]], [[TMP4]], !nosanitize 
[[META2]]
+//
+//                                                                       41 == 
SO_SignedIntegerOverflow
+// CHECK-NEXT:    [[TMP6:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 41), 
!nosanitize [[META2]]
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i1 [[TMP6]], true, !nosanitize [[META2]]
+// CHECK-NEXT:    [[TMP8:%.*]] = or i1 [[OR]], [[TMP7]], !nosanitize [[META2]]
+// CHECK-NEXT:    [[TMP9:%.*]] = and i1 [[TMP5]], [[TMP8]], !nosanitize 
[[META2]]
+// CHECK-NEXT:    br i1 [[TMP9]], label %[[CONT:.*]], label 
%[[HANDLER_DIVREM_OVERFLOW:.*]], !prof [[PROF3:![0-9]+]], !nosanitize [[META2]]
 // CHECK:       [[HANDLER_DIVREM_OVERFLOW]]:
-// CHECK-NEXT:    [[TMP4:%.*]] = zext i32 [[X]] to i64, !nosanitize [[META2]]
-// CHECK-NEXT:    [[TMP5:%.*]] = zext i32 [[Y]] to i64, !nosanitize [[META2]]
-// CHECK-NEXT:    tail call void @__ubsan_handle_divrem_overflow_abort(ptr 
nonnull @[[GLOB1:[0-9]+]], i64 [[TMP4]], i64 [[TMP5]]) #[[ATTR6:[0-9]+]], 
!nosanitize [[META2]]
+// CHECK-NEXT:    [[TMP10:%.*]] = zext i32 [[X]] to i64, !nosanitize [[META2]]
+// CHECK-NEXT:    [[TMP11:%.*]] = zext i32 [[Y]] to i64, !nosanitize [[META2]]
+// CHECK-NEXT:    tail call void @__ubsan_handle_divrem_overflow_abort(ptr 
nonnull @[[GLOB1:[0-9]+]], i64 [[TMP10]], i64 [[TMP11]]) #[[ATTR6:[0-9]+]], 
!nosanitize [[META2]]
 // CHECK-NEXT:    unreachable, !nosanitize [[META2]]
 // CHECK:       [[CONT]]:
 // CHECK-NEXT:    [[DIV:%.*]] = sdiv i32 [[X]], [[Y]]
@@ -27,14 +38,18 @@
 // TR-LABEL: define dso_local noundef i32 @div(
 // TR-SAME: i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) local_unnamed_addr 
#[[ATTR0:[0-9]+]] {
 // TR-NEXT:  [[ENTRY:.*:]]
-// TR-NEXT:    [[TMP0:%.*]] = icmp eq i32 [[Y]], 0, !nosanitize 
[[META2:![0-9]+]]
-// TR-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[X]], -2147483648, !nosanitize 
[[META2]]
-// TR-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[Y]], -1, !nosanitize [[META2]]
-// TR-NEXT:    [[OR_NOT5:%.*]] = and i1 [[TMP1]], [[TMP2]]
-// TR-NEXT:    [[DOTNOT3:%.*]] = or i1 [[TMP0]], [[OR_NOT5]]
-// TR-NEXT:    [[TMP3:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 3), 
!nosanitize [[META2]]
-// TR-NEXT:    [[DOTNOT1:%.*]] = and i1 [[DOTNOT3]], [[TMP3]]
-// TR-NEXT:    br i1 [[DOTNOT1]], label %[[TRAP:.*]], label %[[CONT:.*]], 
!nosanitize [[META2]]
+// TR-NEXT:    [[TMP0:%.*]] = icmp ne i32 [[Y]], 0, !nosanitize 
[[META2:![0-9]+]]
+// TR-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[X]], -2147483648, !nosanitize 
[[META2]]
+// TR-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[Y]], -1, !nosanitize [[META2]]
+// TR-NEXT:    [[OR:%.*]] = or i1 [[TMP1]], [[TMP2]], !nosanitize [[META2]]
+// TR-NEXT:    [[TMP3:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 27), 
!nosanitize [[META2]]
+// TR-NEXT:    [[TMP4:%.*]] = xor i1 [[TMP3]], true, !nosanitize [[META2]]
+// TR-NEXT:    [[TMP5:%.*]] = or i1 [[TMP0]], [[TMP4]], !nosanitize [[META2]]
+// TR-NEXT:    [[TMP6:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 41), 
!nosanitize [[META2]]
+// TR-NEXT:    [[TMP7:%.*]] = xor i1 [[TMP6]], true, !nosanitize [[META2]]
+// TR-NEXT:    [[TMP8:%.*]] = or i1 [[OR]], [[TMP7]], !nosanitize [[META2]]
+// TR-NEXT:    [[TMP9:%.*]] = and i1 [[TMP5]], [[TMP8]], !nosanitize [[META2]]
+// TR-NEXT:    br i1 [[TMP9]], label %[[CONT:.*]], label %[[TRAP:.*]], 
!nosanitize [[META2]]
 // TR:       [[TRAP]]:
 // TR-NEXT:    tail call void @llvm.ubsantrap(i8 3) #[[ATTR5:[0-9]+]], 
!nosanitize [[META2]]
 // TR-NEXT:    unreachable, !nosanitize [[META2]]
@@ -45,18 +60,22 @@
 // REC-LABEL: define dso_local noundef i32 @div(
 // REC-SAME: i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) local_unnamed_addr 
#[[ATTR0:[0-9]+]] {
 // REC-NEXT:  [[ENTRY:.*:]]
-// REC-NEXT:    [[TMP0:%.*]] = icmp eq i32 [[Y]], 0, !nosanitize 
[[META2:![0-9]+]]
-// REC-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[X]], -2147483648, !nosanitize 
[[META2]]
-// REC-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[Y]], -1, !nosanitize [[META2]]
-// REC-NEXT:    [[OR_NOT5:%.*]] = and i1 [[TMP1]], [[TMP2]]
-// REC-NEXT:    [[DOTNOT3:%.*]] = or i1 [[TMP0]], [[OR_NOT5]]
-// REC-NEXT:    [[TMP3:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 3), 
!nosanitize [[META2]]
-// REC-NEXT:    [[DOTNOT1:%.*]] = and i1 [[DOTNOT3]], [[TMP3]]
-// REC-NEXT:    br i1 [[DOTNOT1]], label %[[HANDLER_DIVREM_OVERFLOW:.*]], 
label %[[CONT:.*]], !prof [[PROF3:![0-9]+]], !nosanitize [[META2]]
+// REC-NEXT:    [[TMP0:%.*]] = icmp ne i32 [[Y]], 0, !nosanitize 
[[META2:![0-9]+]]
+// REC-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[X]], -2147483648, !nosanitize 
[[META2]]
+// REC-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[Y]], -1, !nosanitize [[META2]]
+// REC-NEXT:    [[OR:%.*]] = or i1 [[TMP1]], [[TMP2]], !nosanitize [[META2]]
+// REC-NEXT:    [[TMP3:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 27), 
!nosanitize [[META2]]
+// REC-NEXT:    [[TMP4:%.*]] = xor i1 [[TMP3]], true, !nosanitize [[META2]]
+// REC-NEXT:    [[TMP5:%.*]] = or i1 [[TMP0]], [[TMP4]], !nosanitize [[META2]]
+// REC-NEXT:    [[TMP6:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 41), 
!nosanitize [[META2]]
+// REC-NEXT:    [[TMP7:%.*]] = xor i1 [[TMP6]], true, !nosanitize [[META2]]
+// REC-NEXT:    [[TMP8:%.*]] = or i1 [[OR]], [[TMP7]], !nosanitize [[META2]]
+// REC-NEXT:    [[TMP9:%.*]] = and i1 [[TMP5]], [[TMP8]], !nosanitize [[META2]]
+// REC-NEXT:    br i1 [[TMP9]], label %[[CONT:.*]], label 
%[[HANDLER_DIVREM_OVERFLOW:.*]], !prof [[PROF3:![0-9]+]], !nosanitize [[META2]]
 // REC:       [[HANDLER_DIVREM_OVERFLOW]]:
-// REC-NEXT:    [[TMP4:%.*]] = zext i32 [[X]] to i64, !nosanitize [[META2]]
-// REC-NEXT:    [[TMP5:%.*]] = zext i32 [[Y]] to i64, !nosanitize [[META2]]
-// REC-NEXT:    tail call void @__ubsan_handle_divrem_overflow(ptr nonnull 
@[[GLOB1:[0-9]+]], i64 [[TMP4]], i64 [[TMP5]]) #[[ATTR6:[0-9]+]], !nosanitize 
[[META2]]
+// REC-NEXT:    [[TMP10:%.*]] = zext i32 [[X]] to i64, !nosanitize [[META2]]
+// REC-NEXT:    [[TMP11:%.*]] = zext i32 [[Y]] to i64, !nosanitize [[META2]]
+// REC-NEXT:    tail call void @__ubsan_handle_divrem_overflow(ptr nonnull 
@[[GLOB1:[0-9]+]], i64 [[TMP10]], i64 [[TMP11]]) #[[ATTR6:[0-9]+]], !nosanitize 
[[META2]]
 // REC-NEXT:    br label %[[CONT]], !nosanitize [[META2]]
 // REC:       [[CONT]]:
 // REC-NEXT:    [[DIV:%.*]] = sdiv i32 [[X]], [[Y]]
@@ -70,21 +89,23 @@ int div(int x, int y) {
 // CHECK-SAME: ptr noundef readonly [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[TMP0:%.*]] = icmp eq ptr [[X]], null, !nosanitize [[META2]]
-// CHECK-NEXT:    [[TMP1:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 22), 
!nosanitize [[META2]]
+//
+//                                                                       29 == 
SO_Null
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 29), 
!nosanitize [[META2]]
 // CHECK-NEXT:    [[DOTNOT1:%.*]] = and i1 [[TMP0]], [[TMP1]]
-// CHECK-NEXT:    br i1 [[DOTNOT1]], label %[[HANDLER_TYPE_MISMATCH:.*]], 
label %[[CONT:.*]], !prof [[PROF3]], !nosanitize [[META2]]
+// CHECK-NEXT:    br i1 [[DOTNOT1]], label %[[HANDLER_TYPE_MISMATCH:.*]], 
label %[[CONT:.*]], !prof [[PROF4:![0-9]+]], !nosanitize [[META2]]
 // CHECK:       [[HANDLER_TYPE_MISMATCH]]:
 // CHECK-NEXT:    tail call void @__ubsan_handle_type_mismatch_v1_abort(ptr 
nonnull @[[GLOB2:[0-9]+]], i64 0) #[[ATTR6]], !nosanitize [[META2]]
 // CHECK-NEXT:    unreachable, !nosanitize [[META2]]
 // CHECK:       [[CONT]]:
-// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[X]], align 4, !tbaa 
[[TBAA4:![0-9]+]]
+// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[X]], align 4, !tbaa 
[[TBAA5:![0-9]+]]
 // CHECK-NEXT:    ret i32 [[TMP2]]
 //
 // TR-LABEL: define dso_local i32 @null(
 // TR-SAME: ptr noundef readonly [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
 // TR-NEXT:  [[ENTRY:.*:]]
 // TR-NEXT:    [[TMP0:%.*]] = icmp eq ptr [[X]], null, !nosanitize [[META2]]
-// TR-NEXT:    [[TMP1:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 22), 
!nosanitize [[META2]]
+// TR-NEXT:    [[TMP1:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 29), 
!nosanitize [[META2]]
 // TR-NEXT:    [[DOTNOT1:%.*]] = and i1 [[TMP0]], [[TMP1]]
 // TR-NEXT:    br i1 [[DOTNOT1]], label %[[TRAP:.*]], label %[[CONT:.*]], 
!nosanitize [[META2]]
 // TR:       [[TRAP]]:
@@ -98,14 +119,14 @@ int div(int x, int y) {
 // REC-SAME: ptr noundef readonly [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
 // REC-NEXT:  [[ENTRY:.*:]]
 // REC-NEXT:    [[TMP0:%.*]] = icmp eq ptr [[X]], null, !nosanitize [[META2]]
-// REC-NEXT:    [[TMP1:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 22), 
!nosanitize [[META2]]
+// REC-NEXT:    [[TMP1:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 29), 
!nosanitize [[META2]]
 // REC-NEXT:    [[DOTNOT1:%.*]] = and i1 [[TMP0]], [[TMP1]]
-// REC-NEXT:    br i1 [[DOTNOT1]], label %[[HANDLER_TYPE_MISMATCH:.*]], label 
%[[CONT:.*]], !prof [[PROF3]], !nosanitize [[META2]]
+// REC-NEXT:    br i1 [[DOTNOT1]], label %[[HANDLER_TYPE_MISMATCH:.*]], label 
%[[CONT:.*]], !prof [[PROF4:![0-9]+]], !nosanitize [[META2]]
 // REC:       [[HANDLER_TYPE_MISMATCH]]:
 // REC-NEXT:    tail call void @__ubsan_handle_type_mismatch_v1(ptr nonnull 
@[[GLOB2:[0-9]+]], i64 0) #[[ATTR6]], !nosanitize [[META2]]
 // REC-NEXT:    br label %[[CONT]], !nosanitize [[META2]]
 // REC:       [[CONT]]:
-// REC-NEXT:    [[TMP2:%.*]] = load i32, ptr [[X]], align 4, !tbaa 
[[TBAA4:![0-9]+]]
+// REC-NEXT:    [[TMP2:%.*]] = load i32, ptr [[X]], align 4, !tbaa 
[[TBAA5:![0-9]+]]
 // REC-NEXT:    ret i32 [[TMP2]]
 //
 int null(int* x) {
@@ -117,9 +138,11 @@ int null(int* x) {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[TMP0:%.*]] = tail call { i32, i1 } 
@llvm.sadd.with.overflow.i32(i32 [[X]], i32 [[Y]]), !nosanitize [[META2]]
 // CHECK-NEXT:    [[TMP1:%.*]] = extractvalue { i32, i1 } [[TMP0]], 1, 
!nosanitize [[META2]]
-// CHECK-NEXT:    [[TMP2:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 0), 
!nosanitize [[META2]]
+//
+//                                                                       41 == 
SO_SignedIntegerOverflow
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 41), 
!nosanitize [[META2]]
 // CHECK-NEXT:    [[DOTDEMORGAN:%.*]] = and i1 [[TMP1]], [[TMP2]]
-// CHECK-NEXT:    br i1 [[DOTDEMORGAN]], label %[[HANDLER_ADD_OVERFLOW:.*]], 
label %[[CONT:.*]], !prof [[PROF3]], !nosanitize [[META2]]
+// CHECK-NEXT:    br i1 [[DOTDEMORGAN]], label %[[HANDLER_ADD_OVERFLOW:.*]], 
label %[[CONT:.*]], !prof [[PROF4]], !nosanitize [[META2]]
 // CHECK:       [[HANDLER_ADD_OVERFLOW]]:
 // CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[X]] to i64, !nosanitize [[META2]]
 // CHECK-NEXT:    [[TMP4:%.*]] = zext i32 [[Y]] to i64, !nosanitize [[META2]]
@@ -134,7 +157,7 @@ int null(int* x) {
 // TR-NEXT:  [[ENTRY:.*:]]
 // TR-NEXT:    [[TMP0:%.*]] = tail call { i32, i1 } 
@llvm.sadd.with.overflow.i32(i32 [[X]], i32 [[Y]]), !nosanitize [[META2]]
 // TR-NEXT:    [[TMP1:%.*]] = extractvalue { i32, i1 } [[TMP0]], 1, 
!nosanitize [[META2]]
-// TR-NEXT:    [[TMP2:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 0), 
!nosanitize [[META2]]
+// TR-NEXT:    [[TMP2:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 41), 
!nosanitize [[META2]]
 // TR-NEXT:    [[DOTDEMORGAN:%.*]] = and i1 [[TMP1]], [[TMP2]]
 // TR-NEXT:    br i1 [[DOTDEMORGAN]], label %[[TRAP:.*]], label %[[CONT:.*]], 
!nosanitize [[META2]]
 // TR:       [[TRAP]]:
@@ -149,9 +172,9 @@ int null(int* x) {
 // REC-NEXT:  [[ENTRY:.*:]]
 // REC-NEXT:    [[TMP0:%.*]] = tail call { i32, i1 } 
@llvm.sadd.with.overflow.i32(i32 [[X]], i32 [[Y]]), !nosanitize [[META2]]
 // REC-NEXT:    [[TMP1:%.*]] = extractvalue { i32, i1 } [[TMP0]], 1, 
!nosanitize [[META2]]
-// REC-NEXT:    [[TMP2:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 0), 
!nosanitize [[META2]]
+// REC-NEXT:    [[TMP2:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 41), 
!nosanitize [[META2]]
 // REC-NEXT:    [[DOTDEMORGAN:%.*]] = and i1 [[TMP1]], [[TMP2]]
-// REC-NEXT:    br i1 [[DOTDEMORGAN]], label %[[HANDLER_ADD_OVERFLOW:.*]], 
label %[[CONT:.*]], !prof [[PROF3]], !nosanitize [[META2]]
+// REC-NEXT:    br i1 [[DOTDEMORGAN]], label %[[HANDLER_ADD_OVERFLOW:.*]], 
label %[[CONT:.*]], !prof [[PROF4]], !nosanitize [[META2]]
 // REC:       [[HANDLER_ADD_OVERFLOW]]:
 // REC-NEXT:    [[TMP3:%.*]] = zext i32 [[X]] to i64, !nosanitize [[META2]]
 // REC-NEXT:    [[TMP4:%.*]] = zext i32 [[Y]] to i64, !nosanitize [[META2]]
@@ -175,12 +198,14 @@ void use(double*);
 // CHECK-NEXT:    call void @use(ptr noundef nonnull [[VLA]]) #[[ATTR7:[0-9]+]]
 // CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[I]] to i64
 // CHECK-NEXT:    [[TMP1:%.*]] = icmp ule i64 [[TMP0]], [[IDXPROM]]
+//
+//                                                                  71 == 
SO_LocalBounds
 // CHECK-NEXT:    [[TMP2:%.*]] = call i1 @llvm.allow.ubsan.check(i8 71), 
!nosanitize [[META2]]
 // CHECK-NEXT:    [[TMP3:%.*]] = and i1 [[TMP1]], [[TMP2]], !nosanitize 
[[META2]]
 // CHECK-NEXT:    br i1 [[TMP3]], label %[[TRAP:.*]], label %[[BB4:.*]]
 // CHECK:       [[BB4]]:
 // CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr 
[[VLA]], i64 [[IDXPROM]]
-// CHECK-NEXT:    [[TMP5:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa 
[[TBAA8:![0-9]+]]
+// CHECK-NEXT:    [[TMP5:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa 
[[TBAA9:![0-9]+]]
 // CHECK-NEXT:    ret double [[TMP5]]
 // CHECK:       [[TRAP]]:
 // CHECK-NEXT:    call void @__ubsan_handle_local_out_of_bounds_abort() 
#[[ATTR6]], !nosanitize [[META2]]
@@ -218,7 +243,7 @@ void use(double*);
 // REC-NEXT:    br i1 [[TMP3]], label %[[TRAP:.*]], label %[[BB4:.*]]
 // REC:       [[BB4]]:
 // REC-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[VLA]], 
i64 [[IDXPROM]]
-// REC-NEXT:    [[TMP5:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa 
[[TBAA8:![0-9]+]]
+// REC-NEXT:    [[TMP5:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa 
[[TBAA9:![0-9]+]]
 // REC-NEXT:    ret double [[TMP5]]
 // REC:       [[TRAP]]:
 // REC-NEXT:    call void @__ubsan_handle_local_out_of_bounds() #[[ATTR6]], 
!nosanitize [[META2]]
@@ -232,13 +257,14 @@ double lbounds(int b, int i) {
 
 //.
 // CHECK: [[META2]] = !{}
-// CHECK: [[PROF3]] = !{!"branch_weights", i32 1, i32 1048575}
-// CHECK: [[TBAA4]] = !{[[META5:![0-9]+]], [[META5]], i64 0}
-// CHECK: [[META5]] = !{!"int", [[META6:![0-9]+]], i64 0}
-// CHECK: [[META6]] = !{!"omnipotent char", [[META7:![0-9]+]], i64 0}
-// CHECK: [[META7]] = !{!"Simple C/C++ TBAA"}
-// CHECK: [[TBAA8]] = !{[[META9:![0-9]+]], [[META9]], i64 0}
-// CHECK: [[META9]] = !{!"double", [[META6]], i64 0}
+// CHECK: [[PROF3]] = !{!"branch_weights", i32 1048575, i32 1}
+// CHECK: [[PROF4]] = !{!"branch_weights", i32 1, i32 1048575}
+// CHECK: [[TBAA5]] = !{[[META6:![0-9]+]], [[META6]], i64 0}
+// CHECK: [[META6]] = !{!"int", [[META7:![0-9]+]], i64 0}
+// CHECK: [[META7]] = !{!"omnipotent char", [[META8:![0-9]+]], i64 0}
+// CHECK: [[META8]] = !{!"Simple C/C++ TBAA"}
+// CHECK: [[TBAA9]] = !{[[META10:![0-9]+]], [[META10]], i64 0}
+// CHECK: [[META10]] = !{!"double", [[META7]], i64 0}
 //.
 // TR: [[META2]] = !{}
 // TR: [[TBAA3]] = !{[[META4:![0-9]+]], [[META4]], i64 0}
@@ -249,11 +275,12 @@ double lbounds(int b, int i) {
 // TR: [[META8]] = !{!"double", [[META5]], i64 0}
 //.
 // REC: [[META2]] = !{}
-// REC: [[PROF3]] = !{!"branch_weights", i32 1, i32 1048575}
-// REC: [[TBAA4]] = !{[[META5:![0-9]+]], [[META5]], i64 0}
-// REC: [[META5]] = !{!"int", [[META6:![0-9]+]], i64 0}
-// REC: [[META6]] = !{!"omnipotent char", [[META7:![0-9]+]], i64 0}
-// REC: [[META7]] = !{!"Simple C/C++ TBAA"}
-// REC: [[TBAA8]] = !{[[META9:![0-9]+]], [[META9]], i64 0}
-// REC: [[META9]] = !{!"double", [[META6]], i64 0}
+// REC: [[PROF3]] = !{!"branch_weights", i32 1048575, i32 1}
+// REC: [[PROF4]] = !{!"branch_weights", i32 1, i32 1048575}
+// REC: [[TBAA5]] = !{[[META6:![0-9]+]], [[META6]], i64 0}
+// REC: [[META6]] = !{!"int", [[META7:![0-9]+]], i64 0}
+// REC: [[META7]] = !{!"omnipotent char", [[META8:![0-9]+]], i64 0}
+// REC: [[META8]] = !{!"Simple C/C++ TBAA"}
+// REC: [[TBAA9]] = !{[[META10:![0-9]+]], [[META10]], i64 0}
+// REC: [[META10]] = !{!"double", [[META7]], i64 0}
 //.

>From e5a73c1e0d7fb4661b3111f247a2cfb433cd18e8 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurs...@google.com>
Date: Thu, 30 Jan 2025 07:07:22 +0000
Subject: [PATCH 2/2] Avoid use-after-scope by changing lambda to capture by
 value

---
 clang/lib/CodeGen/BackendUtil.cpp | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/clang/lib/CodeGen/BackendUtil.cpp 
b/clang/lib/CodeGen/BackendUtil.cpp
index bc32878d9669ec..97e9bbccd61ef1 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -802,15 +802,16 @@ static void addSanitizers(const Triple &TargetTriple,
   // TODO: remove IsRequested()
   if (LowerAllowCheckPass::IsRequested() || ScaledCutoffs.has_value()) {
     // We want to call it after inline, which is about 
OptimizerEarlyEPCallback.
-    PB.registerOptimizerEarlyEPCallback([&](ModulePassManager &MPM,
-                                            OptimizationLevel Level,
-                                            ThinOrFullLTOPhase Phase) {
-      LowerAllowCheckPass::Options Opts;
-      // TODO: after removing IsRequested(), make this unconditional
-      if (ScaledCutoffs.has_value())
-        Opts.cutoffs = ScaledCutoffs.value();
-      
MPM.addPass(createModuleToFunctionPassAdaptor(LowerAllowCheckPass(Opts)));
-    });
+    PB.registerOptimizerEarlyEPCallback(
+        [ScaledCutoffs](ModulePassManager &MPM, OptimizationLevel Level,
+                        ThinOrFullLTOPhase Phase) {
+          LowerAllowCheckPass::Options Opts;
+          // TODO: after removing IsRequested(), make this unconditional
+          if (ScaledCutoffs.has_value())
+            Opts.cutoffs = ScaledCutoffs.value();
+          MPM.addPass(
+              createModuleToFunctionPassAdaptor(LowerAllowCheckPass(Opts)));
+        });
   }
 }
 

_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to