llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT--> @llvm/pr-subscribers-compiler-rt-sanitizer Author: Vitaly Buka (vitalybuka) <details> <summary>Changes</summary> Locking StackDepotBase fully is very expensive, as 2^20 buckets needs to be locked. Not locking, and unclocking only buckets, needed to be unlocked to avoid deadlocks, increases a chance of data race, when same item can be inserted into table twice. However this is just a small additional memory usage by forked process. --- Full diff: https://github.com/llvm/llvm-project/pull/76280.diff 4 Files Affected: - (modified) compiler-rt/lib/dfsan/dfsan_chained_origin_depot.cpp (+5-1) - (modified) compiler-rt/lib/msan/msan_chained_origin_depot.cpp (+7-2) - (modified) compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp (+12-2) - (modified) compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h (+2-1) ``````````diff diff --git a/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.cpp b/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.cpp index 6644bd6a7c6c0c..d078265258ca14 100644 --- a/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.cpp +++ b/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.cpp @@ -19,9 +19,13 @@ static ChainedOriginDepot chainedOriginDepot; ChainedOriginDepot* GetChainedOriginDepot() { return &chainedOriginDepot; } -void ChainedOriginDepotLockBeforeFork() { chainedOriginDepot.LockAll(); } +void ChainedOriginDepotLockBeforeFork() { + // TODO: Consider same optimization as `StackDepotLockBeforeFork`. + chainedOriginDepot.LockAll(); +} void ChainedOriginDepotUnlockAfterFork(bool fork_child) { + // TODO: Consider same optimization as `StackDepotUnlockAfterFork`. chainedOriginDepot.UnlockAll(); } diff --git a/compiler-rt/lib/msan/msan_chained_origin_depot.cpp b/compiler-rt/lib/msan/msan_chained_origin_depot.cpp index c3bd54141e6c38..66c80708669373 100644 --- a/compiler-rt/lib/msan/msan_chained_origin_depot.cpp +++ b/compiler-rt/lib/msan/msan_chained_origin_depot.cpp @@ -31,10 +31,15 @@ u32 ChainedOriginDepotGet(u32 id, u32 *other) { return chainedOriginDepot.Get(id, other); } -void ChainedOriginDepotBeforeFork() { chainedOriginDepot.LockAll(); } +void ChainedOriginDepotBeforeFork() { + // Don't `chainedOriginDepot.LockAll()`, see `StackDepotLockBeforeFork`. +} void ChainedOriginDepotAfterFork(bool fork_child) { - chainedOriginDepot.UnlockAll(); + // See `StackDepotUnlockAfterFork`. + if (fork_child) { + chainedOriginDepot.UnlockAll(); + } } } // namespace __msan diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp index ce21f3c178bce0..8d134ca5a41aee 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp @@ -216,7 +216,13 @@ StackTrace StackDepotGet(u32 id) { } void StackDepotLockBeforeFork() { - theDepot.LockAll(); + // Do not `theDepot.LockAll()`. It's very slow, but not rely needed. The + // parent process will neither lock nor unlock. Child process risks to be + // deadlocked on already locked buckets. To avoid deadlock we will unlock + // every locked buckets. This may affect consistency of the hash table, but + // the only possible issue is a few items inserted by parent process will be + // not found by child, and the child may insert them again, wasting some space + // in `stackStore`. compress_thread.LockAndStop(); stackStore.LockAll(); } @@ -224,7 +230,11 @@ void StackDepotLockBeforeFork() { void StackDepotUnlockAfterFork(bool fork_child) { stackStore.UnlockAll(); compress_thread.Unlock(); - theDepot.UnlockAll(); + if (fork_child) { + // Only child process needs to unlock to avoid deadlock. See + // `StackDepotLockAll`. + theDepot.UnlockAll(); + } } void StackDepotPrintAll() { diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h index 96d1ddc87fd032..3440a8f628e971 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h @@ -171,7 +171,8 @@ void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() { for (int i = 0; i < kTabSize; ++i) { atomic_uint32_t *p = &tab[i]; uptr s = atomic_load(p, memory_order_relaxed); - unlock(p, s & kUnlockMask); + if (s & kLockMask) + unlock(p, s & kUnlockMask); } } `````````` </details> https://github.com/llvm/llvm-project/pull/76280 _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits