On Mon, Jan 12, 2015 at 09:55:15PM +0100, Jakub Jelinek wrote: > > specific changes from there. > > Yes, I'll try to cherry-pick those tomorrow. > > > I am especially interested in fixing these two issues, but there may be > > other important improvements too: > > > > > > https://gcc.gnu.org/bugzilla/show_bug.cgi?id=64350 : TSAN fails after > > stress-testing for a while > > > > was fixed by > > > > http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_deadlock_detector.h?r1=224518&r2=224517&pathrev=224518 > > http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_deadlock_detector.h?r1=224519&r2=224518&pathrev=224519 > > > > > > > > https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63251 : tsan: corrupted shadow > > stack > > > > was fixed by > > > > http://llvm.org/viewvc/llvm-project?view=revision&revision=224702 > > http://llvm.org/viewvc/llvm-project?view=revision&revision=224834
So do you mean following? I've bootstrapped/regtested on x86_64-linux and i686-linux, but haven't tried any special testcases on it. If it works for you, I'll commit it. 2015-01-13 Jakub Jelinek <ja...@redhat.com> * sanitizer_common/sanitizer_deadlock_detector.h: Cherry pick upstream r224518 and r224519. * tsan/tsan_rtl_thread.cc: Cherry pick upstream r224702 and r224834. --- libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h.jj 2014-05-22 10:18:14.893626024 +0200 +++ libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h 2015-01-13 14:07:40.096414924 +0100 @@ -48,6 +48,8 @@ class DeadlockDetectorTLS { if (epoch_ == current_epoch) return; bv_.clear(); epoch_ = current_epoch; + n_recursive_locks = 0; + n_all_locks_ = 0; } uptr getEpoch() const { return epoch_; } @@ -81,7 +83,8 @@ class DeadlockDetectorTLS { } } // Printf("remLock: %zx %zx\n", lock_id, epoch_); - CHECK(bv_.clearBit(lock_id)); + if (!bv_.clearBit(lock_id)) + return; // probably addLock happened before flush if (n_all_locks_) { for (sptr i = n_all_locks_ - 1; i >= 0; i--) { if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) { @@ -173,6 +176,7 @@ class DeadlockDetector { recycled_nodes_.clear(); available_nodes_.setAll(); g_.clear(); + n_edges_ = 0; return getAvailableNode(data); } --- libsanitizer/tsan/tsan_rtl_thread.cc.jj 2014-09-24 11:08:03.824028080 +0200 +++ libsanitizer/tsan/tsan_rtl_thread.cc 2015-01-13 14:08:06.167954292 +0100 @@ -109,12 +109,13 @@ void ThreadContext::OnStarted(void *arg) thr->dd_pt = ctx->dd->CreatePhysicalThread(); thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id); } + thr->fast_state.SetHistorySize(flags()->history_size); + // Commit switch to the new part of the trace. + // TraceAddEvent will reset stack0/mset0 in the new part for us. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + thr->fast_synch_epoch = epoch0; AcquireImpl(thr, 0, &sync); - thr->fast_state.SetHistorySize(flags()->history_size); - const uptr trace = (epoch0 / kTracePartSize) % TraceParts(); - Trace *thr_trace = ThreadTrace(thr->tid); - thr_trace->headers[trace].epoch0 = epoch0; StatInc(thr, StatSyncAcquire); sync.Reset(&thr->clock_cache); DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx " Jakub