https://github.com/thurstond updated 
https://github.com/llvm/llvm-project/pull/124857

>From 68246540d28c765af1fe4d61244e35cc8ff723cc Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurs...@google.com>
Date: Wed, 22 Jan 2025 18:59:07 +0000
Subject: [PATCH 1/5] [ubsan] Connect -fsanitize-skip-hot-cutoff to
 LowerAllowCheckPass<cutoffs>

This adds the plumbing between -fsanitize-skip-hot-cutoff (introduced
in https://github.com/llvm/llvm-project/pull/121619) and 
LowerAllowCheckPass<cutoffs> (introduced in 
https://github.com/llvm/llvm-project/pull/124211).

The net effect is that -fsanitize-skip-hot-cutoff now combines the 
functionality of -ubsan-guard-checks and -lower-allow-check-percentile-cutoff 
(though this patch does not remove those yet), and generalizes the latter to 
allow per-sanitizer cutoffs.

Note: this patch replaces Intrinsic::allow_ubsan_check's SanitizerHandler 
parameter
with SanitizerOrdinal; this is necessary because the hot cutoffs are
specified in terms of SanitizerOrdinal (e.g., null, alignment), not
SanitizerHandler (e.g., TypeMismatch).
---
 clang/lib/CodeGen/BackendUtil.cpp             |  27 +++-
 clang/lib/CodeGen/CGExpr.cpp                  |  30 ++--
 clang/lib/Frontend/CompilerInvocation.cpp     |   1 -
 clang/test/CodeGen/allow-ubsan-check-inline.c |   5 +
 clang/test/CodeGen/allow-ubsan-check.c        | 143 +++++++++++-------
 5 files changed, 134 insertions(+), 72 deletions(-)

diff --git a/clang/lib/CodeGen/BackendUtil.cpp 
b/clang/lib/CodeGen/BackendUtil.cpp
index 3e65eeb3755d2f..104972e213adae 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -795,12 +795,37 @@ static void addSanitizers(const Triple &TargetTriple,
     PB.registerOptimizerLastEPCallback(SanitizersCallback);
   }
 
-  if (LowerAllowCheckPass::IsRequested()) {
+  bool lowerAllowCheck = LowerAllowCheckPass::IsRequested();
+  // Is there a non-zero cutoff?
+  static const double SanitizerMaskCutoffsEps = 0.000000001f;
+  for (unsigned int i = 0; i < SanitizerKind::SO_Count; ++i) {
+    std::optional<double> maybeCutoff = CodeGenOpts.SanitizeSkipHotCutoffs[i];
+    lowerAllowCheck |= (maybeCutoff.has_value() &&
+                        (maybeCutoff.value() > SanitizerMaskCutoffsEps));
+  }
+
+  if (lowerAllowCheck) {
     // We want to call it after inline, which is about 
OptimizerEarlyEPCallback.
     PB.registerOptimizerEarlyEPCallback([&](ModulePassManager &MPM,
                                             OptimizationLevel Level,
                                             ThinOrFullLTOPhase Phase) {
       LowerAllowCheckPass::Options Opts;
+
+      // SanitizeSkipHotCutoffs stores doubles with range [0, 1]
+      // Opts.cutoffs wants ints with range [0, 999999]
+      for (unsigned int i = 0; i < SanitizerKind::SO_Count; ++i) {
+        std::optional<double> maybeCutoff =
+            CodeGenOpts.SanitizeSkipHotCutoffs[i];
+        if (maybeCutoff.has_value() &&
+            (maybeCutoff.value() > SanitizerMaskCutoffsEps)) {
+          Opts.cutoffs.push_back(
+              std::clamp((int)(maybeCutoff.value() * 1000000), 0, 999999));
+        } else {
+          // Default is don't skip the check
+          Opts.cutoffs.push_back(0);
+        }
+      }
+
       
MPM.addPass(createModuleToFunctionPassAdaptor(LowerAllowCheckPass(Opts)));
     });
   }
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 9676e61cf322d9..0a9cae8c2f02ae 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -3614,29 +3614,35 @@ void CodeGenFunction::EmitCheck(
   llvm::Value *RecoverableCond = nullptr;
   llvm::Value *TrapCond = nullptr;
   bool NoMerge = false;
+  // Expand checks into:
+  //   (Check1 || !allow_ubsan_check) && (Check2 || !allow_ubsan_check) ...
+  // We need separate allow_ubsan_check intrinsics because they have separately
+  // specified cutoffs.
+  // This expression looks expensive but will be simplified after
+  // LowerAllowCheckPass.
+  static const double SanitizerMaskCutoffsEps = 0.000000001f;
   for (auto &[Check, Ord] : Checked) {
+    llvm::Value *GuardedCheck = Check;
+    if (ClSanitizeGuardChecks ||
+        (CGM.getCodeGenOpts().SanitizeSkipHotCutoffs[Ord] >
+         SanitizerMaskCutoffsEps)) {
+      llvm::Value *Allow = Builder.CreateCall(
+          CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
+          llvm::ConstantInt::get(CGM.Int8Ty, Ord));
+      GuardedCheck = Builder.CreateOr(Check, Builder.CreateNot(Allow));
+    }
+
     // -fsanitize-trap= overrides -fsanitize-recover=.
     llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(Ord) ? TrapCond
                          : CGM.getCodeGenOpts().SanitizeRecover.has(Ord)
                              ? RecoverableCond
                              : FatalCond;
-    Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
+    Cond = Cond ? Builder.CreateAnd(Cond, GuardedCheck) : GuardedCheck;
 
     if (!CGM.getCodeGenOpts().SanitizeMergeHandlers.has(Ord))
       NoMerge = true;
   }
 
-  if (ClSanitizeGuardChecks) {
-    llvm::Value *Allow =
-        
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
-                           llvm::ConstantInt::get(CGM.Int8Ty, CheckHandler));
-
-    for (llvm::Value **Cond : {&FatalCond, &RecoverableCond, &TrapCond}) {
-      if (*Cond)
-        *Cond = Builder.CreateOr(*Cond, Builder.CreateNot(Allow));
-    }
-  }
-
   if (TrapCond)
     EmitTrapCheck(TrapCond, CheckHandler, NoMerge);
   if (!FatalCond && !RecoverableCond)
diff --git a/clang/lib/Frontend/CompilerInvocation.cpp 
b/clang/lib/Frontend/CompilerInvocation.cpp
index 44dd69972f8e5d..335ddfbe767f1c 100644
--- a/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/clang/lib/Frontend/CompilerInvocation.cpp
@@ -2314,7 +2314,6 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions 
&Opts, ArgList &Args,
   Opts.SanitizeSkipHotCutoffs = parseSanitizerWeightedKinds(
       "-fsanitize-skip-hot-cutoff=",
       Args.getAllArgValues(OPT_fsanitize_skip_hot_cutoff_EQ), Diags);
-
   Opts.EmitVersionIdentMetadata = Args.hasFlag(OPT_Qy, OPT_Qn, true);
 
   if (!LangOpts->CUDAIsDevice)
diff --git a/clang/test/CodeGen/allow-ubsan-check-inline.c 
b/clang/test/CodeGen/allow-ubsan-check-inline.c
index 1de24ab90dac0e..eed48cf15ecca4 100644
--- a/clang/test/CodeGen/allow-ubsan-check-inline.c
+++ b/clang/test/CodeGen/allow-ubsan-check-inline.c
@@ -1,3 +1,8 @@
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -o - %s 
-fsanitize=signed-integer-overflow 
-fsanitize-skip-hot-cutoff=signed-integer-overflow=0.000001 -O3 -mllvm 
-lower-allow-check-random-rate=1 -Rpass=lower-allow-check 
-Rpass-missed=lower-allow-check -fno-inline 2>&1 | FileCheck %s 
--check-prefixes=NOINL --implicit-check-not="remark:"
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -o - %s 
-fsanitize=signed-integer-overflow 
-fsanitize-skip-hot-cutoff=signed-integer-overflow=0.000001 -O3 -mllvm 
-lower-allow-check-random-rate=1 -Rpass=lower-allow-check 
-Rpass-missed=lower-allow-check 2>&1 | FileCheck %s --check-prefixes=INLINE 
--implicit-check-not="remark:"
+//
+// -ubsan-guard-checks is deprecated and will be removed in the future;
+// use -fsanitize-skip-hot-cutoff, as shown above.
 // RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -o - %s 
-fsanitize=signed-integer-overflow -mllvm -ubsan-guard-checks -O3 -mllvm 
-lower-allow-check-random-rate=1 -Rpass=lower-allow-check 
-Rpass-missed=lower-allow-check -fno-inline 2>&1 | FileCheck %s 
--check-prefixes=NOINL --implicit-check-not="remark:"
 // RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -o - %s 
-fsanitize=signed-integer-overflow -mllvm -ubsan-guard-checks -O3 -mllvm 
-lower-allow-check-random-rate=1 -Rpass=lower-allow-check 
-Rpass-missed=lower-allow-check 2>&1 | FileCheck %s --check-prefixes=INLINE 
--implicit-check-not="remark:"
 
diff --git a/clang/test/CodeGen/allow-ubsan-check.c 
b/clang/test/CodeGen/allow-ubsan-check.c
index b88c1f9cb220d7..0cd81a77f5cc59 100644
--- a/clang/test/CodeGen/allow-ubsan-check.c
+++ b/clang/test/CodeGen/allow-ubsan-check.c
@@ -1,4 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py 
UTC_ARGS: --version 5
+//
+// We can't use -fsanitize-skip-hot-cutoff because that includes both 
-ubsan-guard-checks and
+//-lower-allow-check-percentile-cutoff.
 // RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -O1 -o - %s 
-fsanitize=signed-integer-overflow,integer-divide-by-zero,null,local-bounds 
-mllvm -ubsan-guard-checks | FileCheck %s
 // RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -O1 -o - %s 
-fsanitize=signed-integer-overflow,integer-divide-by-zero,null,local-bounds 
-mllvm -ubsan-guard-checks 
-fsanitize-trap=signed-integer-overflow,integer-divide-by-zero,null,local-bounds
 | FileCheck %s --check-prefixes=TR
 // RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -O1 -o - %s 
-fsanitize=signed-integer-overflow,integer-divide-by-zero,null,local-bounds 
-mllvm -ubsan-guard-checks 
-fsanitize-recover=signed-integer-overflow,integer-divide-by-zero,null,local-bounds
 | FileCheck %s --check-prefixes=REC
@@ -7,18 +10,26 @@
 // CHECK-LABEL: define dso_local noundef i32 @div(
 // CHECK-SAME: i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) 
local_unnamed_addr #[[ATTR0:[0-9]+]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = icmp eq i32 [[Y]], 0, !nosanitize 
[[META2:![0-9]+]]
-// CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[X]], -2147483648, !nosanitize 
[[META2]]
-// CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[Y]], -1, !nosanitize [[META2]]
-// CHECK-NEXT:    [[OR_NOT5:%.*]] = and i1 [[TMP1]], [[TMP2]]
-// CHECK-NEXT:    [[DOTNOT3:%.*]] = or i1 [[TMP0]], [[OR_NOT5]]
-// CHECK-NEXT:    [[TMP3:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 3), 
!nosanitize [[META2]]
-// CHECK-NEXT:    [[DOTNOT1:%.*]] = and i1 [[DOTNOT3]], [[TMP3]]
-// CHECK-NEXT:    br i1 [[DOTNOT1]], label %[[HANDLER_DIVREM_OVERFLOW:.*]], 
label %[[CONT:.*]], !prof [[PROF3:![0-9]+]], !nosanitize [[META2]]
+// CHECK-NEXT:    [[TMP0:%.*]] = icmp ne i32 [[Y]], 0, !nosanitize 
[[META2:![0-9]+]]
+// CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[X]], -2147483648, !nosanitize 
[[META2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[Y]], -1, !nosanitize [[META2]]
+// CHECK-NEXT:    [[OR:%.*]] = or i1 [[TMP1]], [[TMP2]], !nosanitize [[META2]]
+//
+//                                                                       27 == 
SO_IntegerDivideByZero
+// CHECK-NEXT:    [[TMP3:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 27), 
!nosanitize [[META2]]
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i1 [[TMP3]], true, !nosanitize [[META2]]
+// CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[TMP0]], [[TMP4]], !nosanitize 
[[META2]]
+//
+//                                                                       41 == 
SO_SignedIntegerOverflow
+// CHECK-NEXT:    [[TMP6:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 41), 
!nosanitize [[META2]]
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i1 [[TMP6]], true, !nosanitize [[META2]]
+// CHECK-NEXT:    [[TMP8:%.*]] = or i1 [[OR]], [[TMP7]], !nosanitize [[META2]]
+// CHECK-NEXT:    [[TMP9:%.*]] = and i1 [[TMP5]], [[TMP8]], !nosanitize 
[[META2]]
+// CHECK-NEXT:    br i1 [[TMP9]], label %[[CONT:.*]], label 
%[[HANDLER_DIVREM_OVERFLOW:.*]], !prof [[PROF3:![0-9]+]], !nosanitize [[META2]]
 // CHECK:       [[HANDLER_DIVREM_OVERFLOW]]:
-// CHECK-NEXT:    [[TMP4:%.*]] = zext i32 [[X]] to i64, !nosanitize [[META2]]
-// CHECK-NEXT:    [[TMP5:%.*]] = zext i32 [[Y]] to i64, !nosanitize [[META2]]
-// CHECK-NEXT:    tail call void @__ubsan_handle_divrem_overflow_abort(ptr 
nonnull @[[GLOB1:[0-9]+]], i64 [[TMP4]], i64 [[TMP5]]) #[[ATTR6:[0-9]+]], 
!nosanitize [[META2]]
+// CHECK-NEXT:    [[TMP10:%.*]] = zext i32 [[X]] to i64, !nosanitize [[META2]]
+// CHECK-NEXT:    [[TMP11:%.*]] = zext i32 [[Y]] to i64, !nosanitize [[META2]]
+// CHECK-NEXT:    tail call void @__ubsan_handle_divrem_overflow_abort(ptr 
nonnull @[[GLOB1:[0-9]+]], i64 [[TMP10]], i64 [[TMP11]]) #[[ATTR6:[0-9]+]], 
!nosanitize [[META2]]
 // CHECK-NEXT:    unreachable, !nosanitize [[META2]]
 // CHECK:       [[CONT]]:
 // CHECK-NEXT:    [[DIV:%.*]] = sdiv i32 [[X]], [[Y]]
@@ -27,14 +38,18 @@
 // TR-LABEL: define dso_local noundef i32 @div(
 // TR-SAME: i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) local_unnamed_addr 
#[[ATTR0:[0-9]+]] {
 // TR-NEXT:  [[ENTRY:.*:]]
-// TR-NEXT:    [[TMP0:%.*]] = icmp eq i32 [[Y]], 0, !nosanitize 
[[META2:![0-9]+]]
-// TR-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[X]], -2147483648, !nosanitize 
[[META2]]
-// TR-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[Y]], -1, !nosanitize [[META2]]
-// TR-NEXT:    [[OR_NOT5:%.*]] = and i1 [[TMP1]], [[TMP2]]
-// TR-NEXT:    [[DOTNOT3:%.*]] = or i1 [[TMP0]], [[OR_NOT5]]
-// TR-NEXT:    [[TMP3:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 3), 
!nosanitize [[META2]]
-// TR-NEXT:    [[DOTNOT1:%.*]] = and i1 [[DOTNOT3]], [[TMP3]]
-// TR-NEXT:    br i1 [[DOTNOT1]], label %[[TRAP:.*]], label %[[CONT:.*]], 
!nosanitize [[META2]]
+// TR-NEXT:    [[TMP0:%.*]] = icmp ne i32 [[Y]], 0, !nosanitize 
[[META2:![0-9]+]]
+// TR-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[X]], -2147483648, !nosanitize 
[[META2]]
+// TR-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[Y]], -1, !nosanitize [[META2]]
+// TR-NEXT:    [[OR:%.*]] = or i1 [[TMP1]], [[TMP2]], !nosanitize [[META2]]
+// TR-NEXT:    [[TMP3:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 27), 
!nosanitize [[META2]]
+// TR-NEXT:    [[TMP4:%.*]] = xor i1 [[TMP3]], true, !nosanitize [[META2]]
+// TR-NEXT:    [[TMP5:%.*]] = or i1 [[TMP0]], [[TMP4]], !nosanitize [[META2]]
+// TR-NEXT:    [[TMP6:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 41), 
!nosanitize [[META2]]
+// TR-NEXT:    [[TMP7:%.*]] = xor i1 [[TMP6]], true, !nosanitize [[META2]]
+// TR-NEXT:    [[TMP8:%.*]] = or i1 [[OR]], [[TMP7]], !nosanitize [[META2]]
+// TR-NEXT:    [[TMP9:%.*]] = and i1 [[TMP5]], [[TMP8]], !nosanitize [[META2]]
+// TR-NEXT:    br i1 [[TMP9]], label %[[CONT:.*]], label %[[TRAP:.*]], 
!nosanitize [[META2]]
 // TR:       [[TRAP]]:
 // TR-NEXT:    tail call void @llvm.ubsantrap(i8 3) #[[ATTR5:[0-9]+]], 
!nosanitize [[META2]]
 // TR-NEXT:    unreachable, !nosanitize [[META2]]
@@ -45,18 +60,22 @@
 // REC-LABEL: define dso_local noundef i32 @div(
 // REC-SAME: i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) local_unnamed_addr 
#[[ATTR0:[0-9]+]] {
 // REC-NEXT:  [[ENTRY:.*:]]
-// REC-NEXT:    [[TMP0:%.*]] = icmp eq i32 [[Y]], 0, !nosanitize 
[[META2:![0-9]+]]
-// REC-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[X]], -2147483648, !nosanitize 
[[META2]]
-// REC-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[Y]], -1, !nosanitize [[META2]]
-// REC-NEXT:    [[OR_NOT5:%.*]] = and i1 [[TMP1]], [[TMP2]]
-// REC-NEXT:    [[DOTNOT3:%.*]] = or i1 [[TMP0]], [[OR_NOT5]]
-// REC-NEXT:    [[TMP3:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 3), 
!nosanitize [[META2]]
-// REC-NEXT:    [[DOTNOT1:%.*]] = and i1 [[DOTNOT3]], [[TMP3]]
-// REC-NEXT:    br i1 [[DOTNOT1]], label %[[HANDLER_DIVREM_OVERFLOW:.*]], 
label %[[CONT:.*]], !prof [[PROF3:![0-9]+]], !nosanitize [[META2]]
+// REC-NEXT:    [[TMP0:%.*]] = icmp ne i32 [[Y]], 0, !nosanitize 
[[META2:![0-9]+]]
+// REC-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[X]], -2147483648, !nosanitize 
[[META2]]
+// REC-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[Y]], -1, !nosanitize [[META2]]
+// REC-NEXT:    [[OR:%.*]] = or i1 [[TMP1]], [[TMP2]], !nosanitize [[META2]]
+// REC-NEXT:    [[TMP3:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 27), 
!nosanitize [[META2]]
+// REC-NEXT:    [[TMP4:%.*]] = xor i1 [[TMP3]], true, !nosanitize [[META2]]
+// REC-NEXT:    [[TMP5:%.*]] = or i1 [[TMP0]], [[TMP4]], !nosanitize [[META2]]
+// REC-NEXT:    [[TMP6:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 41), 
!nosanitize [[META2]]
+// REC-NEXT:    [[TMP7:%.*]] = xor i1 [[TMP6]], true, !nosanitize [[META2]]
+// REC-NEXT:    [[TMP8:%.*]] = or i1 [[OR]], [[TMP7]], !nosanitize [[META2]]
+// REC-NEXT:    [[TMP9:%.*]] = and i1 [[TMP5]], [[TMP8]], !nosanitize [[META2]]
+// REC-NEXT:    br i1 [[TMP9]], label %[[CONT:.*]], label 
%[[HANDLER_DIVREM_OVERFLOW:.*]], !prof [[PROF3:![0-9]+]], !nosanitize [[META2]]
 // REC:       [[HANDLER_DIVREM_OVERFLOW]]:
-// REC-NEXT:    [[TMP4:%.*]] = zext i32 [[X]] to i64, !nosanitize [[META2]]
-// REC-NEXT:    [[TMP5:%.*]] = zext i32 [[Y]] to i64, !nosanitize [[META2]]
-// REC-NEXT:    tail call void @__ubsan_handle_divrem_overflow(ptr nonnull 
@[[GLOB1:[0-9]+]], i64 [[TMP4]], i64 [[TMP5]]) #[[ATTR6:[0-9]+]], !nosanitize 
[[META2]]
+// REC-NEXT:    [[TMP10:%.*]] = zext i32 [[X]] to i64, !nosanitize [[META2]]
+// REC-NEXT:    [[TMP11:%.*]] = zext i32 [[Y]] to i64, !nosanitize [[META2]]
+// REC-NEXT:    tail call void @__ubsan_handle_divrem_overflow(ptr nonnull 
@[[GLOB1:[0-9]+]], i64 [[TMP10]], i64 [[TMP11]]) #[[ATTR6:[0-9]+]], !nosanitize 
[[META2]]
 // REC-NEXT:    br label %[[CONT]], !nosanitize [[META2]]
 // REC:       [[CONT]]:
 // REC-NEXT:    [[DIV:%.*]] = sdiv i32 [[X]], [[Y]]
@@ -70,21 +89,23 @@ int div(int x, int y) {
 // CHECK-SAME: ptr noundef readonly [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[TMP0:%.*]] = icmp eq ptr [[X]], null, !nosanitize [[META2]]
-// CHECK-NEXT:    [[TMP1:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 22), 
!nosanitize [[META2]]
+//
+//                                                                       29 == 
SO_Null
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 29), 
!nosanitize [[META2]]
 // CHECK-NEXT:    [[DOTNOT1:%.*]] = and i1 [[TMP0]], [[TMP1]]
-// CHECK-NEXT:    br i1 [[DOTNOT1]], label %[[HANDLER_TYPE_MISMATCH:.*]], 
label %[[CONT:.*]], !prof [[PROF3]], !nosanitize [[META2]]
+// CHECK-NEXT:    br i1 [[DOTNOT1]], label %[[HANDLER_TYPE_MISMATCH:.*]], 
label %[[CONT:.*]], !prof [[PROF4:![0-9]+]], !nosanitize [[META2]]
 // CHECK:       [[HANDLER_TYPE_MISMATCH]]:
 // CHECK-NEXT:    tail call void @__ubsan_handle_type_mismatch_v1_abort(ptr 
nonnull @[[GLOB2:[0-9]+]], i64 0) #[[ATTR6]], !nosanitize [[META2]]
 // CHECK-NEXT:    unreachable, !nosanitize [[META2]]
 // CHECK:       [[CONT]]:
-// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[X]], align 4, !tbaa 
[[TBAA4:![0-9]+]]
+// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[X]], align 4, !tbaa 
[[TBAA5:![0-9]+]]
 // CHECK-NEXT:    ret i32 [[TMP2]]
 //
 // TR-LABEL: define dso_local i32 @null(
 // TR-SAME: ptr noundef readonly [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
 // TR-NEXT:  [[ENTRY:.*:]]
 // TR-NEXT:    [[TMP0:%.*]] = icmp eq ptr [[X]], null, !nosanitize [[META2]]
-// TR-NEXT:    [[TMP1:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 22), 
!nosanitize [[META2]]
+// TR-NEXT:    [[TMP1:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 29), 
!nosanitize [[META2]]
 // TR-NEXT:    [[DOTNOT1:%.*]] = and i1 [[TMP0]], [[TMP1]]
 // TR-NEXT:    br i1 [[DOTNOT1]], label %[[TRAP:.*]], label %[[CONT:.*]], 
!nosanitize [[META2]]
 // TR:       [[TRAP]]:
@@ -98,14 +119,14 @@ int div(int x, int y) {
 // REC-SAME: ptr noundef readonly [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
 // REC-NEXT:  [[ENTRY:.*:]]
 // REC-NEXT:    [[TMP0:%.*]] = icmp eq ptr [[X]], null, !nosanitize [[META2]]
-// REC-NEXT:    [[TMP1:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 22), 
!nosanitize [[META2]]
+// REC-NEXT:    [[TMP1:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 29), 
!nosanitize [[META2]]
 // REC-NEXT:    [[DOTNOT1:%.*]] = and i1 [[TMP0]], [[TMP1]]
-// REC-NEXT:    br i1 [[DOTNOT1]], label %[[HANDLER_TYPE_MISMATCH:.*]], label 
%[[CONT:.*]], !prof [[PROF3]], !nosanitize [[META2]]
+// REC-NEXT:    br i1 [[DOTNOT1]], label %[[HANDLER_TYPE_MISMATCH:.*]], label 
%[[CONT:.*]], !prof [[PROF4:![0-9]+]], !nosanitize [[META2]]
 // REC:       [[HANDLER_TYPE_MISMATCH]]:
 // REC-NEXT:    tail call void @__ubsan_handle_type_mismatch_v1(ptr nonnull 
@[[GLOB2:[0-9]+]], i64 0) #[[ATTR6]], !nosanitize [[META2]]
 // REC-NEXT:    br label %[[CONT]], !nosanitize [[META2]]
 // REC:       [[CONT]]:
-// REC-NEXT:    [[TMP2:%.*]] = load i32, ptr [[X]], align 4, !tbaa 
[[TBAA4:![0-9]+]]
+// REC-NEXT:    [[TMP2:%.*]] = load i32, ptr [[X]], align 4, !tbaa 
[[TBAA5:![0-9]+]]
 // REC-NEXT:    ret i32 [[TMP2]]
 //
 int null(int* x) {
@@ -117,9 +138,11 @@ int null(int* x) {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[TMP0:%.*]] = tail call { i32, i1 } 
@llvm.sadd.with.overflow.i32(i32 [[X]], i32 [[Y]]), !nosanitize [[META2]]
 // CHECK-NEXT:    [[TMP1:%.*]] = extractvalue { i32, i1 } [[TMP0]], 1, 
!nosanitize [[META2]]
-// CHECK-NEXT:    [[TMP2:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 0), 
!nosanitize [[META2]]
+//
+//                                                                       41 == 
SO_SignedIntegerOverflow
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 41), 
!nosanitize [[META2]]
 // CHECK-NEXT:    [[DOTDEMORGAN:%.*]] = and i1 [[TMP1]], [[TMP2]]
-// CHECK-NEXT:    br i1 [[DOTDEMORGAN]], label %[[HANDLER_ADD_OVERFLOW:.*]], 
label %[[CONT:.*]], !prof [[PROF3]], !nosanitize [[META2]]
+// CHECK-NEXT:    br i1 [[DOTDEMORGAN]], label %[[HANDLER_ADD_OVERFLOW:.*]], 
label %[[CONT:.*]], !prof [[PROF4]], !nosanitize [[META2]]
 // CHECK:       [[HANDLER_ADD_OVERFLOW]]:
 // CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[X]] to i64, !nosanitize [[META2]]
 // CHECK-NEXT:    [[TMP4:%.*]] = zext i32 [[Y]] to i64, !nosanitize [[META2]]
@@ -134,7 +157,7 @@ int null(int* x) {
 // TR-NEXT:  [[ENTRY:.*:]]
 // TR-NEXT:    [[TMP0:%.*]] = tail call { i32, i1 } 
@llvm.sadd.with.overflow.i32(i32 [[X]], i32 [[Y]]), !nosanitize [[META2]]
 // TR-NEXT:    [[TMP1:%.*]] = extractvalue { i32, i1 } [[TMP0]], 1, 
!nosanitize [[META2]]
-// TR-NEXT:    [[TMP2:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 0), 
!nosanitize [[META2]]
+// TR-NEXT:    [[TMP2:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 41), 
!nosanitize [[META2]]
 // TR-NEXT:    [[DOTDEMORGAN:%.*]] = and i1 [[TMP1]], [[TMP2]]
 // TR-NEXT:    br i1 [[DOTDEMORGAN]], label %[[TRAP:.*]], label %[[CONT:.*]], 
!nosanitize [[META2]]
 // TR:       [[TRAP]]:
@@ -149,9 +172,9 @@ int null(int* x) {
 // REC-NEXT:  [[ENTRY:.*:]]
 // REC-NEXT:    [[TMP0:%.*]] = tail call { i32, i1 } 
@llvm.sadd.with.overflow.i32(i32 [[X]], i32 [[Y]]), !nosanitize [[META2]]
 // REC-NEXT:    [[TMP1:%.*]] = extractvalue { i32, i1 } [[TMP0]], 1, 
!nosanitize [[META2]]
-// REC-NEXT:    [[TMP2:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 0), 
!nosanitize [[META2]]
+// REC-NEXT:    [[TMP2:%.*]] = tail call i1 @llvm.allow.ubsan.check(i8 41), 
!nosanitize [[META2]]
 // REC-NEXT:    [[DOTDEMORGAN:%.*]] = and i1 [[TMP1]], [[TMP2]]
-// REC-NEXT:    br i1 [[DOTDEMORGAN]], label %[[HANDLER_ADD_OVERFLOW:.*]], 
label %[[CONT:.*]], !prof [[PROF3]], !nosanitize [[META2]]
+// REC-NEXT:    br i1 [[DOTDEMORGAN]], label %[[HANDLER_ADD_OVERFLOW:.*]], 
label %[[CONT:.*]], !prof [[PROF4]], !nosanitize [[META2]]
 // REC:       [[HANDLER_ADD_OVERFLOW]]:
 // REC-NEXT:    [[TMP3:%.*]] = zext i32 [[X]] to i64, !nosanitize [[META2]]
 // REC-NEXT:    [[TMP4:%.*]] = zext i32 [[Y]] to i64, !nosanitize [[META2]]
@@ -175,12 +198,14 @@ void use(double*);
 // CHECK-NEXT:    call void @use(ptr noundef nonnull [[VLA]]) #[[ATTR7:[0-9]+]]
 // CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[I]] to i64
 // CHECK-NEXT:    [[TMP1:%.*]] = icmp ule i64 [[TMP0]], [[IDXPROM]]
+//
+//                                                                  71 == 
SO_LocalBounds
 // CHECK-NEXT:    [[TMP2:%.*]] = call i1 @llvm.allow.ubsan.check(i8 71), 
!nosanitize [[META2]]
 // CHECK-NEXT:    [[TMP3:%.*]] = and i1 [[TMP1]], [[TMP2]], !nosanitize 
[[META2]]
 // CHECK-NEXT:    br i1 [[TMP3]], label %[[TRAP:.*]], label %[[BB4:.*]]
 // CHECK:       [[BB4]]:
 // CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr 
[[VLA]], i64 [[IDXPROM]]
-// CHECK-NEXT:    [[TMP5:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa 
[[TBAA8:![0-9]+]]
+// CHECK-NEXT:    [[TMP5:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa 
[[TBAA9:![0-9]+]]
 // CHECK-NEXT:    ret double [[TMP5]]
 // CHECK:       [[TRAP]]:
 // CHECK-NEXT:    call void @__ubsan_handle_local_out_of_bounds_abort() 
#[[ATTR6]], !nosanitize [[META2]]
@@ -218,7 +243,7 @@ void use(double*);
 // REC-NEXT:    br i1 [[TMP3]], label %[[TRAP:.*]], label %[[BB4:.*]]
 // REC:       [[BB4]]:
 // REC-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[VLA]], 
i64 [[IDXPROM]]
-// REC-NEXT:    [[TMP5:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa 
[[TBAA8:![0-9]+]]
+// REC-NEXT:    [[TMP5:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa 
[[TBAA9:![0-9]+]]
 // REC-NEXT:    ret double [[TMP5]]
 // REC:       [[TRAP]]:
 // REC-NEXT:    call void @__ubsan_handle_local_out_of_bounds() #[[ATTR6]], 
!nosanitize [[META2]]
@@ -232,13 +257,14 @@ double lbounds(int b, int i) {
 
 //.
 // CHECK: [[META2]] = !{}
-// CHECK: [[PROF3]] = !{!"branch_weights", i32 1, i32 1048575}
-// CHECK: [[TBAA4]] = !{[[META5:![0-9]+]], [[META5]], i64 0}
-// CHECK: [[META5]] = !{!"int", [[META6:![0-9]+]], i64 0}
-// CHECK: [[META6]] = !{!"omnipotent char", [[META7:![0-9]+]], i64 0}
-// CHECK: [[META7]] = !{!"Simple C/C++ TBAA"}
-// CHECK: [[TBAA8]] = !{[[META9:![0-9]+]], [[META9]], i64 0}
-// CHECK: [[META9]] = !{!"double", [[META6]], i64 0}
+// CHECK: [[PROF3]] = !{!"branch_weights", i32 1048575, i32 1}
+// CHECK: [[PROF4]] = !{!"branch_weights", i32 1, i32 1048575}
+// CHECK: [[TBAA5]] = !{[[META6:![0-9]+]], [[META6]], i64 0}
+// CHECK: [[META6]] = !{!"int", [[META7:![0-9]+]], i64 0}
+// CHECK: [[META7]] = !{!"omnipotent char", [[META8:![0-9]+]], i64 0}
+// CHECK: [[META8]] = !{!"Simple C/C++ TBAA"}
+// CHECK: [[TBAA9]] = !{[[META10:![0-9]+]], [[META10]], i64 0}
+// CHECK: [[META10]] = !{!"double", [[META7]], i64 0}
 //.
 // TR: [[META2]] = !{}
 // TR: [[TBAA3]] = !{[[META4:![0-9]+]], [[META4]], i64 0}
@@ -249,11 +275,12 @@ double lbounds(int b, int i) {
 // TR: [[META8]] = !{!"double", [[META5]], i64 0}
 //.
 // REC: [[META2]] = !{}
-// REC: [[PROF3]] = !{!"branch_weights", i32 1, i32 1048575}
-// REC: [[TBAA4]] = !{[[META5:![0-9]+]], [[META5]], i64 0}
-// REC: [[META5]] = !{!"int", [[META6:![0-9]+]], i64 0}
-// REC: [[META6]] = !{!"omnipotent char", [[META7:![0-9]+]], i64 0}
-// REC: [[META7]] = !{!"Simple C/C++ TBAA"}
-// REC: [[TBAA8]] = !{[[META9:![0-9]+]], [[META9]], i64 0}
-// REC: [[META9]] = !{!"double", [[META6]], i64 0}
+// REC: [[PROF3]] = !{!"branch_weights", i32 1048575, i32 1}
+// REC: [[PROF4]] = !{!"branch_weights", i32 1, i32 1048575}
+// REC: [[TBAA5]] = !{[[META6:![0-9]+]], [[META6]], i64 0}
+// REC: [[META6]] = !{!"int", [[META7:![0-9]+]], i64 0}
+// REC: [[META7]] = !{!"omnipotent char", [[META8:![0-9]+]], i64 0}
+// REC: [[META8]] = !{!"Simple C/C++ TBAA"}
+// REC: [[TBAA9]] = !{[[META10:![0-9]+]], [[META10]], i64 0}
+// REC: [[META10]] = !{!"double", [[META7]], i64 0}
 //.

>From 5a85798f637f66e08fe751857e42161907435f60 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurs...@google.com>
Date: Tue, 28 Jan 2025 20:23:51 +0000
Subject: [PATCH 2/5] Don't clamp to 1000000

---
 clang/lib/CodeGen/BackendUtil.cpp | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/clang/lib/CodeGen/BackendUtil.cpp 
b/clang/lib/CodeGen/BackendUtil.cpp
index 104972e213adae..1d796fc2e3e1f6 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -812,14 +812,13 @@ static void addSanitizers(const Triple &TargetTriple,
       LowerAllowCheckPass::Options Opts;
 
       // SanitizeSkipHotCutoffs stores doubles with range [0, 1]
-      // Opts.cutoffs wants ints with range [0, 999999]
+      // Opts.cutoffs wants ints with range [0, 1000000]
       for (unsigned int i = 0; i < SanitizerKind::SO_Count; ++i) {
         std::optional<double> maybeCutoff =
             CodeGenOpts.SanitizeSkipHotCutoffs[i];
         if (maybeCutoff.has_value() &&
             (maybeCutoff.value() > SanitizerMaskCutoffsEps)) {
-          Opts.cutoffs.push_back(
-              std::clamp((int)(maybeCutoff.value() * 1000000), 0, 999999));
+          Opts.cutoffs.push_back(maybeCutoff.value() * 1000000);
         } else {
           // Default is don't skip the check
           Opts.cutoffs.push_back(0);

>From c2e8f0461ad07f4f5733c408cd243aedf10822c6 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurs...@google.com>
Date: Tue, 28 Jan 2025 22:43:39 +0000
Subject: [PATCH 3/5] Simplify optional and epsilon handling

---
 clang/lib/CodeGen/BackendUtil.cpp | 21 ++++++---------------
 1 file changed, 6 insertions(+), 15 deletions(-)

diff --git a/clang/lib/CodeGen/BackendUtil.cpp 
b/clang/lib/CodeGen/BackendUtil.cpp
index 1d796fc2e3e1f6..351e13c3c829d0 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -797,11 +797,9 @@ static void addSanitizers(const Triple &TargetTriple,
 
   bool lowerAllowCheck = LowerAllowCheckPass::IsRequested();
   // Is there a non-zero cutoff?
-  static const double SanitizerMaskCutoffsEps = 0.000000001f;
+  static constexpr double SanitizerMaskCutoffsEps = 0.000000001f;
   for (unsigned int i = 0; i < SanitizerKind::SO_Count; ++i) {
-    std::optional<double> maybeCutoff = CodeGenOpts.SanitizeSkipHotCutoffs[i];
-    lowerAllowCheck |= (maybeCutoff.has_value() &&
-                        (maybeCutoff.value() > SanitizerMaskCutoffsEps));
+    lowerAllowCheck |= (CodeGenOpts.SanitizeSkipHotCutoffs[i].value_or(0) > 
SanitizerMaskCutoffsEps);
   }
 
   if (lowerAllowCheck) {
@@ -811,18 +809,11 @@ static void addSanitizers(const Triple &TargetTriple,
                                             ThinOrFullLTOPhase Phase) {
       LowerAllowCheckPass::Options Opts;
 
-      // SanitizeSkipHotCutoffs stores doubles with range [0, 1]
-      // Opts.cutoffs wants ints with range [0, 1000000]
+      // SanitizeSkipHotCutoffs: doubles with range [0, 1]
+      // Opts.cutoffs: ints with range [0, 1000000]
+      static_assert(static_cast<int>(SanitizerMaskCutoffsEps * 1000000) == 0);
       for (unsigned int i = 0; i < SanitizerKind::SO_Count; ++i) {
-        std::optional<double> maybeCutoff =
-            CodeGenOpts.SanitizeSkipHotCutoffs[i];
-        if (maybeCutoff.has_value() &&
-            (maybeCutoff.value() > SanitizerMaskCutoffsEps)) {
-          Opts.cutoffs.push_back(maybeCutoff.value() * 1000000);
-        } else {
-          // Default is don't skip the check
-          Opts.cutoffs.push_back(0);
-        }
+        
Opts.cutoffs.push_back(CodeGenOpts.SanitizeSkipHotCutoffs[i].value_or(0) * 
1000000);
       }
 
       
MPM.addPass(createModuleToFunctionPassAdaptor(LowerAllowCheckPass(Opts)));

>From d11b1447f6881e9c1453d07ebd3dbe8374e5d2f4 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurs...@google.com>
Date: Tue, 28 Jan 2025 23:25:09 +0000
Subject: [PATCH 4/5] clang-format

---
 clang/lib/CodeGen/BackendUtil.cpp | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/clang/lib/CodeGen/BackendUtil.cpp 
b/clang/lib/CodeGen/BackendUtil.cpp
index 351e13c3c829d0..b699d4f9611ae2 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -799,7 +799,8 @@ static void addSanitizers(const Triple &TargetTriple,
   // Is there a non-zero cutoff?
   static constexpr double SanitizerMaskCutoffsEps = 0.000000001f;
   for (unsigned int i = 0; i < SanitizerKind::SO_Count; ++i) {
-    lowerAllowCheck |= (CodeGenOpts.SanitizeSkipHotCutoffs[i].value_or(0) > 
SanitizerMaskCutoffsEps);
+    lowerAllowCheck |= (CodeGenOpts.SanitizeSkipHotCutoffs[i].value_or(0) >
+                        SanitizerMaskCutoffsEps);
   }
 
   if (lowerAllowCheck) {
@@ -813,7 +814,8 @@ static void addSanitizers(const Triple &TargetTriple,
       // Opts.cutoffs: ints with range [0, 1000000]
       static_assert(static_cast<int>(SanitizerMaskCutoffsEps * 1000000) == 0);
       for (unsigned int i = 0; i < SanitizerKind::SO_Count; ++i) {
-        
Opts.cutoffs.push_back(CodeGenOpts.SanitizeSkipHotCutoffs[i].value_or(0) * 
1000000);
+        Opts.cutoffs.push_back(
+            CodeGenOpts.SanitizeSkipHotCutoffs[i].value_or(0) * 1000000);
       }
 
       
MPM.addPass(createModuleToFunctionPassAdaptor(LowerAllowCheckPass(Opts)));

>From 9a3ff427202d1e4cf089732c73d07c2fc90d83da Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurs...@google.com>
Date: Wed, 29 Jan 2025 18:51:42 +0000
Subject: [PATCH 5/5] Address Vitaly's feedback

---
 clang/include/clang/Basic/Sanitizers.h |  4 ++++
 clang/lib/Basic/Sanitizers.cpp         | 18 +++++++++++++++++
 clang/lib/CodeGen/BackendUtil.cpp      | 27 +++++++++++++-------------
 clang/lib/CodeGen/CGExpr.cpp           |  4 +---
 4 files changed, 36 insertions(+), 17 deletions(-)

diff --git a/clang/include/clang/Basic/Sanitizers.h 
b/clang/include/clang/Basic/Sanitizers.h
index fc0576d452b17f..7ca3fef3e6eefa 100644
--- a/clang/include/clang/Basic/Sanitizers.h
+++ b/clang/include/clang/Basic/Sanitizers.h
@@ -162,6 +162,10 @@ class SanitizerMaskCutoffs {
 
   void set(SanitizerMask K, double V);
   void clear(SanitizerMask K = SanitizerKind::All);
+
+  // Returns nullopt if all the values are zero.
+  // Otherwise, return value contains a vector of all the scaled values.
+  std::optional<std::vector<int>> getAllScaled(int ScalingFactor) const;
 };
 
 struct SanitizerSet {
diff --git a/clang/lib/Basic/Sanitizers.cpp b/clang/lib/Basic/Sanitizers.cpp
index 5b9b88d032702f..a7d0cc73f1ba77 100644
--- a/clang/lib/Basic/Sanitizers.cpp
+++ b/clang/lib/Basic/Sanitizers.cpp
@@ -18,6 +18,7 @@
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/raw_ostream.h"
 #include <algorithm>
+#include <cmath>
 #include <optional>
 
 using namespace clang;
@@ -43,6 +44,23 @@ std::optional<double> 
SanitizerMaskCutoffs::operator[](unsigned Kind) const {
 
 void SanitizerMaskCutoffs::clear(SanitizerMask K) { set(K, 0); }
 
+std::optional<std::vector<int>> SanitizerMaskCutoffs::getAllScaled(int 
ScalingFactor) const {
+    std::vector<int> scaledCutoffs;
+
+    bool anyNonZero = false;
+    for (unsigned int i = 0; i < SanitizerKind::SO_Count; ++i) {
+      int scaled = round((operator[](i)).value_or(0) * ScalingFactor);
+      scaledCutoffs.push_back(scaled);
+      anyNonZero |= (scaled != 0);
+    }
+
+    if (anyNonZero)
+      return scaledCutoffs;
+
+    return std::nullopt;
+}
+
+
 // Once LLVM switches to C++17, the constexpr variables can be inline and we
 // won't need this.
 #define SANITIZER(NAME, ID) constexpr SanitizerMask SanitizerKind::ID;
diff --git a/clang/lib/CodeGen/BackendUtil.cpp 
b/clang/lib/CodeGen/BackendUtil.cpp
index b699d4f9611ae2..ef410fb53c94d1 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -795,27 +795,26 @@ static void addSanitizers(const Triple &TargetTriple,
     PB.registerOptimizerLastEPCallback(SanitizersCallback);
   }
 
-  bool lowerAllowCheck = LowerAllowCheckPass::IsRequested();
-  // Is there a non-zero cutoff?
-  static constexpr double SanitizerMaskCutoffsEps = 0.000000001f;
-  for (unsigned int i = 0; i < SanitizerKind::SO_Count; ++i) {
-    lowerAllowCheck |= (CodeGenOpts.SanitizeSkipHotCutoffs[i].value_or(0) >
-                        SanitizerMaskCutoffsEps);
-  }
+  // SanitizeSkipHotCutoffs: doubles with range [0, 1]
+  // Opts.cutoffs: ints with range [0, 1000000]
+  std::optional<std::vector<int>> scaledCutoffs = 
CodeGenOpts.SanitizeSkipHotCutoffs.getAllScaled(1000000);
 
-  if (lowerAllowCheck) {
+  // TODO: remove IsRequested()
+  if (LowerAllowCheckPass::IsRequested() || scaledCutoffs.has_value()) {
     // We want to call it after inline, which is about 
OptimizerEarlyEPCallback.
     PB.registerOptimizerEarlyEPCallback([&](ModulePassManager &MPM,
                                             OptimizationLevel Level,
                                             ThinOrFullLTOPhase Phase) {
       LowerAllowCheckPass::Options Opts;
 
-      // SanitizeSkipHotCutoffs: doubles with range [0, 1]
-      // Opts.cutoffs: ints with range [0, 1000000]
-      static_assert(static_cast<int>(SanitizerMaskCutoffsEps * 1000000) == 0);
-      for (unsigned int i = 0; i < SanitizerKind::SO_Count; ++i) {
-        Opts.cutoffs.push_back(
-            CodeGenOpts.SanitizeSkipHotCutoffs[i].value_or(0) * 1000000);
+      if (scaledCutoffs.has_value()) {
+        // Copy from std::vector<int> to std::vector<unsigned int>
+        Opts.cutoffs = {scaledCutoffs.value().begin(), 
scaledCutoffs.value().end()};
+      } else {
+        // TODO: remove this after we remove IsRequested()
+        for (unsigned int i = 0; i < SanitizerKind::SO_Count; ++i) {
+          Opts.cutoffs.push_back(0);
+        }
       }
 
       
MPM.addPass(createModuleToFunctionPassAdaptor(LowerAllowCheckPass(Opts)));
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 0a9cae8c2f02ae..bf8df2789f58db 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -3620,12 +3620,10 @@ void CodeGenFunction::EmitCheck(
   // specified cutoffs.
   // This expression looks expensive but will be simplified after
   // LowerAllowCheckPass.
-  static const double SanitizerMaskCutoffsEps = 0.000000001f;
   for (auto &[Check, Ord] : Checked) {
     llvm::Value *GuardedCheck = Check;
     if (ClSanitizeGuardChecks ||
-        (CGM.getCodeGenOpts().SanitizeSkipHotCutoffs[Ord] >
-         SanitizerMaskCutoffsEps)) {
+        (CGM.getCodeGenOpts().SanitizeSkipHotCutoffs[Ord] > 0)) {
       llvm::Value *Allow = Builder.CreateCall(
           CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
           llvm::ConstantInt::get(CGM.Int8Ty, Ord));

_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to