https://github.com/rymrg updated 
https://github.com/llvm/llvm-project/pull/145540

>From 89b3a5541fadc69b721d584a95d695e809eb1f78 Mon Sep 17 00:00:00 2001
From: rymrg <54061433+ry...@users.noreply.github.com>
Date: Mon, 23 Jun 2025 21:55:32 +0300
Subject: [PATCH 1/2] RSan: https://doi.org/10.1145/3729277

Without value support
---
 compiler-rt/lib/tsan/rtl/CMakeLists.txt       |   7 +
 compiler-rt/lib/tsan/rtl/rsan.cpp             |   8 +
 compiler-rt/lib/tsan/rtl/rsan_action.hpp      |  97 +++
 compiler-rt/lib/tsan/rtl/rsan_arena.hpp       |  45 ++
 compiler-rt/lib/tsan/rtl/rsan_defs.hpp        |  90 +++
 compiler-rt/lib/tsan/rtl/rsan_dense_map.h     | 714 ++++++++++++++++++
 compiler-rt/lib/tsan/rtl/rsan_instrument.hpp  | 358 +++++++++
 compiler-rt/lib/tsan/rtl/rsan_lock.hpp        |  33 +
 compiler-rt/lib/tsan/rtl/rsan_map.hpp         |  88 +++
 compiler-rt/lib/tsan/rtl/rsan_memoryorder.hpp |  65 ++
 compiler-rt/lib/tsan/rtl/rsan_report.cpp      |  72 ++
 compiler-rt/lib/tsan/rtl/rsan_report.hpp      |  85 +++
 .../lib/tsan/rtl/rsan_robustnessmodel.hpp     | 286 +++++++
 compiler-rt/lib/tsan/rtl/rsan_stacktrace.cpp  | 134 ++++
 compiler-rt/lib/tsan/rtl/rsan_stacktrace.hpp  |  92 +++
 compiler-rt/lib/tsan/rtl/rsan_vector.h        | 178 +++++
 compiler-rt/lib/tsan/rtl/rsan_vectorclock.hpp | 115 +++
 compiler-rt/lib/tsan/rtl/tsan_flags.inc       |   3 +
 .../lib/tsan/rtl/tsan_interface_atomic.cpp    | 107 ++-
 compiler-rt/lib/tsan/rtl/tsan_mman.cpp        |   8 +
 compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp   |  11 +
 21 files changed, 2569 insertions(+), 27 deletions(-)
 create mode 100644 compiler-rt/lib/tsan/rtl/rsan.cpp
 create mode 100644 compiler-rt/lib/tsan/rtl/rsan_action.hpp
 create mode 100644 compiler-rt/lib/tsan/rtl/rsan_arena.hpp
 create mode 100644 compiler-rt/lib/tsan/rtl/rsan_defs.hpp
 create mode 100644 compiler-rt/lib/tsan/rtl/rsan_dense_map.h
 create mode 100644 compiler-rt/lib/tsan/rtl/rsan_instrument.hpp
 create mode 100644 compiler-rt/lib/tsan/rtl/rsan_lock.hpp
 create mode 100644 compiler-rt/lib/tsan/rtl/rsan_map.hpp
 create mode 100644 compiler-rt/lib/tsan/rtl/rsan_memoryorder.hpp
 create mode 100644 compiler-rt/lib/tsan/rtl/rsan_report.cpp
 create mode 100644 compiler-rt/lib/tsan/rtl/rsan_report.hpp
 create mode 100644 compiler-rt/lib/tsan/rtl/rsan_robustnessmodel.hpp
 create mode 100644 compiler-rt/lib/tsan/rtl/rsan_stacktrace.cpp
 create mode 100644 compiler-rt/lib/tsan/rtl/rsan_stacktrace.hpp
 create mode 100644 compiler-rt/lib/tsan/rtl/rsan_vector.h
 create mode 100644 compiler-rt/lib/tsan/rtl/rsan_vectorclock.hpp

diff --git a/compiler-rt/lib/tsan/rtl/CMakeLists.txt 
b/compiler-rt/lib/tsan/rtl/CMakeLists.txt
index d7d84706bfd58..eb5f4a84fa359 100644
--- a/compiler-rt/lib/tsan/rtl/CMakeLists.txt
+++ b/compiler-rt/lib/tsan/rtl/CMakeLists.txt
@@ -49,6 +49,9 @@ set(TSAN_SOURCES
   tsan_symbolize.cpp
   tsan_sync.cpp
   tsan_vector_clock.cpp
+  rsan.cpp
+  rsan_report.cpp
+  rsan_stacktrace.cpp
   )
 
 set(TSAN_CXX_SOURCES
@@ -59,6 +62,10 @@ set(TSAN_PREINIT_SOURCES
   tsan_preinit.cpp
   )
 
+set_source_files_properties(tsan_interface_atomic.cpp PROPERTIES COMPILE_FLAGS 
-std=c++20)
+set_source_files_properties(tsan_mman.cpp PROPERTIES COMPILE_FLAGS -std=c++20)
+set_source_files_properties(tsan_rtl_mutex.cpp PROPERTIES COMPILE_FLAGS 
-std=c++20)
+
 if(APPLE)
   list(APPEND TSAN_SOURCES
     tsan_interceptors_mac.cpp
diff --git a/compiler-rt/lib/tsan/rtl/rsan.cpp 
b/compiler-rt/lib/tsan/rtl/rsan.cpp
new file mode 100644
index 0000000000000..fb696eb277b98
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan.cpp
@@ -0,0 +1,8 @@
+#include "rsan_vectorclock.hpp"
+#include "rsan_robustnessmodel.hpp"
+#include "rsan_instrument.hpp"
+#include "rsan_map.hpp"
+#include "rsan_arena.hpp"
+
+namespace Robustness{
+} // namespace Robustness
diff --git a/compiler-rt/lib/tsan/rtl/rsan_action.hpp 
b/compiler-rt/lib/tsan/rtl/rsan_action.hpp
new file mode 100644
index 0000000000000..a066b4e6ea8fc
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_action.hpp
@@ -0,0 +1,97 @@
+#pragma once
+#include "rsan_defs.hpp"
+namespace Robustness::Action{
+       struct StoreAction{
+               ThreadId tid;
+               Address addr;
+               int size;
+       };
+       struct LoadAction{
+               ThreadId tid;
+               Address addr;
+               int size;
+       };
+       struct AtomicVerifyAction{
+               ThreadId tid;
+               Address addr;
+               morder mo;
+               int size;
+       };
+       struct AtomicVerifyStoreAction{
+               ThreadId tid;
+               Address addr;
+               morder mo;
+               int size;
+       };
+       struct AtomicLoadAction{
+               ThreadId tid;
+               Address addr;
+               morder mo;
+               int size;
+               bool rmw;
+               DebugInfo dbg;
+       };
+       struct AtomicStoreAction{
+               ThreadId tid;
+               Address addr;
+               morder mo;
+               int size;
+               uint64_t oldValue;
+               uint64_t newValue;
+               DebugInfo dbg;
+       };
+       struct AtomicRMWAction{
+               ThreadId tid;
+               Address addr;
+               morder mo;
+               int size;
+               uint64_t oldValue;
+               uint64_t newValue;
+               DebugInfo dbg;
+       };
+       struct AtomicCasAction{
+               ThreadId tid;
+               Address addr;
+               morder mo;
+               int size;
+               uint64_t oldValue;
+               uint64_t newValue;
+               bool success;
+               DebugInfo dbg;
+       };
+       struct FenceAction{
+               ThreadId tid;
+               morder mo;
+       };
+       struct TrackAction{
+               ThreadId tid;
+               Address addr;
+               uint64_t value;
+       };
+       struct WaitAction{
+               ThreadId tid;
+               Address addr;
+               uint64_t value;
+               DebugInfo dbg;
+       };
+       struct BcasAction{
+               ThreadId tid;
+               Address addr;
+               uint64_t value;
+               DebugInfo dbg;
+       };
+       struct ThreadCreate{
+               ThreadId creator, createe;
+       };
+       struct ThreadJoin{
+               ThreadId absorber, absorbee;
+       };
+       struct Free{
+               ThreadId tid;
+               Address addr;
+               uptr size;
+               DebugInfo dbg;
+       };
+}
+
+
diff --git a/compiler-rt/lib/tsan/rtl/rsan_arena.hpp 
b/compiler-rt/lib/tsan/rtl/rsan_arena.hpp
new file mode 100644
index 0000000000000..95fe3c5229942
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_arena.hpp
@@ -0,0 +1,45 @@
+#pragma once
+#include "rsan_vector.h"
+#include "rsan_defs.hpp"
+#include "sanitizer_common/sanitizer_placement_new.h"
+
+namespace Robustness {
+       template< class T >
+               class Arena {
+
+                       //const FACTOR = 2;
+                       static const u8 BASE = 8;
+
+                       u64 cv = 0;
+                       u64 ci = 0;
+
+                       Vector<Vector<T>> vs;
+                       Arena(const Arena&) = delete;
+
+
+                       public:
+                       Arena() = default;
+                       ~Arena() {
+                               for (auto& v : vs)
+                                       v.clear();
+                       }
+
+                       T* allocate(){
+                               if (cv == vs.size()){
+                                       vs.push_back();
+                                       vs[cv].resize(BASE << (cv));
+                                       ci = 0;
+                               }
+                               DCHECK_GT(vs.size(), cv);
+                               DCHECK_GT(vs[cv].size(), ci);
+                               auto ret = &vs[cv][ci++];
+                               DCHECK_GT(ret, 0);
+                               if (ci >= vs[cv].size()){
+                                       ++cv;
+                               }
+
+                               new (ret) T();
+                               return ret;
+                       }
+               };
+} // namespace Robustness
diff --git a/compiler-rt/lib/tsan/rtl/rsan_defs.hpp 
b/compiler-rt/lib/tsan/rtl/rsan_defs.hpp
new file mode 100644
index 0000000000000..c5ca506865090
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_defs.hpp
@@ -0,0 +1,90 @@
+#pragma once
+#include "tsan_defs.h"
+#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+
+//class __tsan::ThreadState;
+
+namespace Robustness{
+       using __tsan::s8;
+       using __tsan::u8;
+       using __tsan::s16;
+       using __tsan::u16;
+       using __tsan::s32;
+       using __tsan::u32;
+       using __tsan::s64;
+       using __tsan::u64;
+       using __tsan::uptr;
+       typedef s64 timestamp_t;
+       typedef s64 ssize_t;
+       typedef u64 uint64_t;
+       typedef s64 int64_t;
+       typedef __PTRDIFF_TYPE__ ptrdiff_t;
+       typedef __SIZE_TYPE__ size_t;
+
+       typedef u8 uint8_t;;
+
+       typedef u64 Address;
+       typedef u64 LocationId;
+       
+       typedef u32 ThreadId;
+
+       using __tsan::InternalScopedString;
+
+       using __tsan::flags;
+
+       using __sanitizer::IsAligned;
+
+       using __sanitizer::LowLevelAllocator;
+       using __sanitizer::InternalAlloc;
+       using __sanitizer::InternalFree;
+       using __sanitizer::internal_memcpy;
+       using __sanitizer::internal_memmove;
+       using __sanitizer::internal_memset;
+       using __sanitizer::RoundUpTo;
+       using __sanitizer::RoundUpToPowerOfTwo;
+       using __sanitizer::GetPageSizeCached;
+       using __sanitizer::MostSignificantSetBitIndex;
+       using __sanitizer::MmapOrDie;
+       using __sanitizer::UnmapOrDie;
+       using __sanitizer::Max;
+       using __sanitizer::Swap;
+       using __sanitizer::forward;
+       using __sanitizer::move;
+
+       using __sanitizer::Printf;
+       using __sanitizer::Report;
+
+       using __sanitizer::Lock;
+       using __sanitizer::Mutex;
+
+       template <typename T1, typename T2>
+       struct Pair{
+               T1 first;
+               T2 second;
+       };
+       template <typename T1, typename T2>
+       auto pair(T1 fst, T2 snd){
+               return Pair<T1, T2>{fst, snd};
+       }
+
+       using __tsan::max;
+       using __tsan::min;
+
+       enum class ViolationType{
+               read, write,
+       };
+
+       struct DebugInfo {
+               __tsan::ThreadState* thr = nullptr;
+               uptr pc = 0xDEADBEEF;
+       };
+
+       template<class>
+               inline constexpr bool always_false_v = false;
+
+       inline bool isRobustness() {
+               return __tsan::flags()->enable_robustness;
+       }
+} //  namespace Robustness
diff --git a/compiler-rt/lib/tsan/rtl/rsan_dense_map.h 
b/compiler-rt/lib/tsan/rtl/rsan_dense_map.h
new file mode 100644
index 0000000000000..57775613ed00b
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_dense_map.h
@@ -0,0 +1,714 @@
+//===- sanitizer_dense_map.h - Dense probed hash table ----------*- C++ 
-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM 
Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This is fork of llvm/ADT/DenseMap.h class with the following changes:
+//  * Use mmap to allocate.
+//  * No iterators.
+//  * Does not shrink.
+//
+//===----------------------------------------------------------------------===//
+
+#pragma once
+
+#include "rsan_defs.hpp"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_dense_map_info.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_type_traits.h"
+
+namespace Robustness {
+       namespace detail{
+               using __sanitizer::detail::DenseMapPair;
+               using __sanitizer::detail::combineHashValue;
+       } // namespace detail
+       using __sanitizer::DenseMapInfo;
+
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+          typename BucketT>
+class DenseMapBase {
+ public:
+  using size_type = unsigned;
+  using key_type = KeyT;
+  using mapped_type = ValueT;
+  using value_type = BucketT;
+
+  WARN_UNUSED_RESULT bool empty() const { return getNumEntries() == 0; }
+  unsigned size() const { return getNumEntries(); }
+
+  /// Grow the densemap so that it can contain at least \p NumEntries items
+  /// before resizing again.
+  void reserve(size_type NumEntries) {
+    auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
+    if (NumBuckets > getNumBuckets())
+      grow(NumBuckets);
+  }
+
+  void clear() {
+    if (getNumEntries() == 0 && getNumTombstones() == 0)
+      return;
+
+    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+    if (__sanitizer::is_trivially_destructible<ValueT>::value) {
+      // Use a simpler loop when values don't need destruction.
+      for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
+        P->getFirst() = EmptyKey;
+    } else {
+      unsigned NumEntries = getNumEntries();
+      for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+        if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
+          if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
+            P->getSecond().~ValueT();
+            --NumEntries;
+          }
+          P->getFirst() = EmptyKey;
+        }
+      }
+      CHECK_EQ(NumEntries, 0);
+    }
+    setNumEntries(0);
+    setNumTombstones(0);
+  }
+
+  /// Return 1 if the specified key is in the map, 0 otherwise.
+  size_type count(const KeyT &Key) const {
+    const BucketT *TheBucket;
+    return LookupBucketFor(Key, TheBucket) ? 1 : 0;
+  }
+
+  value_type *find(const KeyT &Key) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return TheBucket;
+    return nullptr;
+  }
+  const value_type *find(const KeyT &Key) const {
+    const BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return TheBucket;
+    return nullptr;
+  }
+  bool contains(const KeyT &Key) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return true;
+    return false;
+  }
+
+  /// Alternate version of find() which allows a different, and possibly
+  /// less expensive, key type.
+  /// The DenseMapInfo is responsible for supplying methods
+  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+  /// type used.
+  template <class LookupKeyT>
+  value_type *find_as(const LookupKeyT &Key) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return TheBucket;
+    return nullptr;
+  }
+  template <class LookupKeyT>
+  const value_type *find_as(const LookupKeyT &Key) const {
+    const BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return TheBucket;
+    return nullptr;
+  }
+
+  /// lookup - Return the entry for the specified key, or a default
+  /// constructed value if no such entry exists.
+  ValueT lookup(const KeyT &Key) const {
+    const BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return TheBucket->getSecond();
+    return ValueT();
+  }
+
+  // Inserts key,value pair into the map if the key isn't already in the map.
+  // If the key is already in the map, it returns false and doesn't update the
+  // value.
+  detail::DenseMapPair<value_type *, bool> insert(const value_type &KV) {
+    return try_emplace(KV.first, KV.second);
+  }
+
+  // Inserts key,value pair into the map if the key isn't already in the map.
+  // If the key is already in the map, it returns false and doesn't update the
+  // value.
+  detail::DenseMapPair<value_type *, bool> insert(value_type &&KV) {
+    return try_emplace(__sanitizer::move(KV.first),
+                       __sanitizer::move(KV.second));
+  }
+
+  // Inserts key,value pair into the map if the key isn't already in the map.
+  // The value is constructed in-place if the key is not in the map, otherwise
+  // it is not moved.
+  template <typename... Ts>
+  detail::DenseMapPair<value_type *, bool> try_emplace(KeyT &&Key,
+                                                       Ts &&...Args) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return {TheBucket, false};  // Already in map.
+
+    // Otherwise, insert the new element.
+    TheBucket = InsertIntoBucket(TheBucket, __sanitizer::move(Key),
+                                 __sanitizer::forward<Ts>(Args)...);
+    return {TheBucket, true};
+  }
+
+  // Inserts key,value pair into the map if the key isn't already in the map.
+  // The value is constructed in-place if the key is not in the map, otherwise
+  // it is not moved.
+  template <typename... Ts>
+  detail::DenseMapPair<value_type *, bool> try_emplace(const KeyT &Key,
+                                                       Ts &&...Args) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return {TheBucket, false};  // Already in map.
+
+    // Otherwise, insert the new element.
+    TheBucket =
+        InsertIntoBucket(TheBucket, Key, __sanitizer::forward<Ts>(Args)...);
+    return {TheBucket, true};
+  }
+
+  /// Alternate version of insert() which allows a different, and possibly
+  /// less expensive, key type.
+  /// The DenseMapInfo is responsible for supplying methods
+  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+  /// type used.
+  template <typename LookupKeyT>
+  detail::DenseMapPair<value_type *, bool> insert_as(value_type &&KV,
+                                                     const LookupKeyT &Val) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Val, TheBucket))
+      return {TheBucket, false};  // Already in map.
+
+    // Otherwise, insert the new element.
+    TheBucket =
+        InsertIntoBucketWithLookup(TheBucket, __sanitizer::move(KV.first),
+                                   __sanitizer::move(KV.second), Val);
+    return {TheBucket, true};
+  }
+
+  bool erase(const KeyT &Val) {
+    BucketT *TheBucket;
+    if (!LookupBucketFor(Val, TheBucket))
+      return false;  // not in map.
+
+    TheBucket->getSecond().~ValueT();
+    TheBucket->getFirst() = getTombstoneKey();
+    decrementNumEntries();
+    incrementNumTombstones();
+    return true;
+  }
+
+  void erase(value_type *I) {
+    CHECK_NE(I, nullptr);
+    BucketT *TheBucket = &*I;
+    TheBucket->getSecond().~ValueT();
+    TheBucket->getFirst() = getTombstoneKey();
+    decrementNumEntries();
+    incrementNumTombstones();
+  }
+
+  value_type &FindAndConstruct(const KeyT &Key) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return *TheBucket;
+
+    return *InsertIntoBucket(TheBucket, Key);
+  }
+
+  ValueT &operator[](const KeyT &Key) { return FindAndConstruct(Key).second; }
+
+  value_type &FindAndConstruct(KeyT &&Key) {
+    BucketT *TheBucket;
+    if (LookupBucketFor(Key, TheBucket))
+      return *TheBucket;
+
+    return *InsertIntoBucket(TheBucket, __sanitizer::move(Key));
+  }
+
+  ValueT &operator[](KeyT &&Key) {
+    return FindAndConstruct(__sanitizer::move(Key)).second;
+  }
+
+  /// Iterate over active entries of the container.
+  ///
+  /// Function can return fast to stop the process.
+  template <class Fn>
+  void forEach(Fn fn) {
+    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+    for (auto *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+      const KeyT K = P->getFirst();
+      if (!KeyInfoT::isEqual(K, EmptyKey) &&
+          !KeyInfoT::isEqual(K, TombstoneKey)) {
+        if (!fn(*P))
+          return;
+      }
+    }
+  }
+
+  template <class Fn>
+  void forEach(Fn fn) const {
+    const_cast<DenseMapBase *>(this)->forEach(
+        [&](const value_type &KV) { return fn(KV); });
+  }
+
+ protected:
+  DenseMapBase() = default;
+
+  void destroyAll() {
+    if (getNumBuckets() == 0)  // Nothing to do.
+      return;
+
+    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+    for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+      if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
+          !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
+        P->getSecond().~ValueT();
+      P->getFirst().~KeyT();
+    }
+  }
+
+  void initEmpty() {
+    setNumEntries(0);
+    setNumTombstones(0);
+
+    CHECK_EQ((getNumBuckets() & (getNumBuckets() - 1)), 0);
+    const KeyT EmptyKey = getEmptyKey();
+    for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
+      ::new (&B->getFirst()) KeyT(EmptyKey);
+  }
+
+  /// Returns the number of buckets to allocate to ensure that the DenseMap can
+  /// accommodate \p NumEntries without need to grow().
+  unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
+    // Ensure that "NumEntries * 4 < NumBuckets * 3"
+    if (NumEntries == 0)
+      return 0;
+    // +1 is required because of the strict equality.
+    // For example if NumEntries is 48, we need to return 401.
+    return RoundUpToPowerOfTwo((NumEntries * 4 / 3 + 1) + /* NextPowerOf2 */ 
1);
+  }
+
+  void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
+    initEmpty();
+
+    // Insert all the old elements.
+    const KeyT EmptyKey = getEmptyKey();
+    const KeyT TombstoneKey = getTombstoneKey();
+    for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
+      if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
+          !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
+        // Insert the key/value into the new table.
+        BucketT *DestBucket;
+        bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
+        (void)FoundVal;  // silence warning.
+        CHECK(!FoundVal);
+        DestBucket->getFirst() = __sanitizer::move(B->getFirst());
+        ::new (&DestBucket->getSecond())
+            ValueT(__sanitizer::move(B->getSecond()));
+        incrementNumEntries();
+
+        // Free the value.
+        B->getSecond().~ValueT();
+      }
+      B->getFirst().~KeyT();
+    }
+  }
+
+  template <typename OtherBaseT>
+  void copyFrom(
+      const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
+    CHECK_NE(&other, this);
+    CHECK_EQ(getNumBuckets(), other.getNumBuckets());
+
+    setNumEntries(other.getNumEntries());
+    setNumTombstones(other.getNumTombstones());
+
+    if (__sanitizer::is_trivially_copyable<KeyT>::value &&
+        __sanitizer::is_trivially_copyable<ValueT>::value)
+      internal_memcpy(reinterpret_cast<void *>(getBuckets()),
+                      other.getBuckets(), getNumBuckets() * sizeof(BucketT));
+    else
+      for (uptr i = 0; i < getNumBuckets(); ++i) {
+        ::new (&getBuckets()[i].getFirst())
+            KeyT(other.getBuckets()[i].getFirst());
+        if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
+            !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
+          ::new (&getBuckets()[i].getSecond())
+              ValueT(other.getBuckets()[i].getSecond());
+      }
+  }
+
+  static unsigned getHashValue(const KeyT &Val) {
+    return KeyInfoT::getHashValue(Val);
+  }
+
+  template <typename LookupKeyT>
+  static unsigned getHashValue(const LookupKeyT &Val) {
+    return KeyInfoT::getHashValue(Val);
+  }
+
+  static const KeyT getEmptyKey() { return KeyInfoT::getEmptyKey(); }
+
+  static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); }
+
+ private:
+  unsigned getNumEntries() const {
+    return static_cast<const DerivedT *>(this)->getNumEntries();
+  }
+
+  void setNumEntries(unsigned Num) {
+    static_cast<DerivedT *>(this)->setNumEntries(Num);
+  }
+
+  void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
+
+  void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
+
+  unsigned getNumTombstones() const {
+    return static_cast<const DerivedT *>(this)->getNumTombstones();
+  }
+
+  void setNumTombstones(unsigned Num) {
+    static_cast<DerivedT *>(this)->setNumTombstones(Num);
+  }
+
+  void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
+
+  void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
+
+  const BucketT *getBuckets() const {
+    return static_cast<const DerivedT *>(this)->getBuckets();
+  }
+
+  BucketT *getBuckets() { return static_cast<DerivedT *>(this)->getBuckets(); }
+
+  unsigned getNumBuckets() const {
+    return static_cast<const DerivedT *>(this)->getNumBuckets();
+  }
+
+  BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }
+
+  const BucketT *getBucketsEnd() const {
+    return getBuckets() + getNumBuckets();
+  }
+
+  void grow(unsigned AtLeast) { static_cast<DerivedT *>(this)->grow(AtLeast); }
+
+  template <typename KeyArg, typename... ValueArgs>
+  BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
+                            ValueArgs &&...Values) {
+    TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
+
+    TheBucket->getFirst() = __sanitizer::forward<KeyArg>(Key);
+    ::new (&TheBucket->getSecond())
+        ValueT(__sanitizer::forward<ValueArgs>(Values)...);
+    return TheBucket;
+  }
+
+  template <typename LookupKeyT>
+  BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
+                                      ValueT &&Value, LookupKeyT &Lookup) {
+    TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
+
+    TheBucket->getFirst() = __sanitizer::move(Key);
+    ::new (&TheBucket->getSecond()) ValueT(__sanitizer::move(Value));
+    return TheBucket;
+  }
+
+  template <typename LookupKeyT>
+  BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
+                                BucketT *TheBucket) {
+    // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
+    // the buckets are empty (meaning that many are filled with tombstones),
+    // grow the table.
+    //
+    // The later case is tricky.  For example, if we had one empty bucket with
+    // tons of tombstones, failing lookups (e.g. for insertion) would have to
+    // probe almost the entire table until it found the empty bucket.  If the
+    // table completely filled with tombstones, no lookup would ever succeed,
+    // causing infinite loops in lookup.
+    unsigned NewNumEntries = getNumEntries() + 1;
+    unsigned NumBuckets = getNumBuckets();
+    if (UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
+      this->grow(NumBuckets * 2);
+      LookupBucketFor(Lookup, TheBucket);
+      NumBuckets = getNumBuckets();
+    } else if (UNLIKELY(NumBuckets - (NewNumEntries + getNumTombstones()) <=
+                        NumBuckets / 8)) {
+      this->grow(NumBuckets);
+      LookupBucketFor(Lookup, TheBucket);
+    }
+    CHECK(TheBucket);
+
+    // Only update the state after we've grown our bucket space appropriately
+    // so that when growing buckets we have self-consistent entry count.
+    incrementNumEntries();
+
+    // If we are writing over a tombstone, remember this.
+    const KeyT EmptyKey = getEmptyKey();
+    if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
+      decrementNumTombstones();
+
+    return TheBucket;
+  }
+
+  /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
+  /// FoundBucket.  If the bucket contains the key and a value, this returns
+  /// true, otherwise it returns a bucket with an empty marker or tombstone and
+  /// returns false.
+  template <typename LookupKeyT>
+  bool LookupBucketFor(const LookupKeyT &Val,
+                       const BucketT *&FoundBucket) const {
+    const BucketT *BucketsPtr = getBuckets();
+    const unsigned NumBuckets = getNumBuckets();
+
+    if (NumBuckets == 0) {
+      FoundBucket = nullptr;
+      return false;
+    }
+
+    // FoundTombstone - Keep track of whether we find a tombstone while 
probing.
+    const BucketT *FoundTombstone = nullptr;
+    const KeyT EmptyKey = getEmptyKey();
+    const KeyT TombstoneKey = getTombstoneKey();
+    CHECK(!KeyInfoT::isEqual(Val, EmptyKey));
+    CHECK(!KeyInfoT::isEqual(Val, TombstoneKey));
+
+    unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
+    unsigned ProbeAmt = 1;
+    while (true) {
+      const BucketT *ThisBucket = BucketsPtr + BucketNo;
+      // Found Val's bucket?  If so, return it.
+      if (LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
+        FoundBucket = ThisBucket;
+        return true;
+      }
+
+      // If we found an empty bucket, the key doesn't exist in the set.
+      // Insert it and return the default value.
+      if (LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
+        // If we've already seen a tombstone while probing, fill it in instead
+        // of the empty bucket we eventually probed to.
+        FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
+        return false;
+      }
+
+      // If this is a tombstone, remember it.  If Val ends up not in the map, 
we
+      // prefer to return it than something that would require more probing.
+      if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
+          !FoundTombstone)
+        FoundTombstone = ThisBucket;  // Remember the first tombstone found.
+
+      // Otherwise, it's a hash collision or a tombstone, continue quadratic
+      // probing.
+      BucketNo += ProbeAmt++;
+      BucketNo &= (NumBuckets - 1);
+    }
+  }
+
+  template <typename LookupKeyT>
+  bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
+    const BucketT *ConstFoundBucket;
+    bool Result = const_cast<const DenseMapBase *>(this)->LookupBucketFor(
+        Val, ConstFoundBucket);
+    FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
+    return Result;
+  }
+
+ public:
+  /// Return the approximate size (in bytes) of the actual map.
+  /// This is just the raw memory used by DenseMap.
+  /// If entries are pointers to objects, the size of the referenced objects
+  /// are not included.
+  uptr getMemorySize() const {
+    return RoundUpTo(getNumBuckets() * sizeof(BucketT), GetPageSizeCached());
+  }
+};
+
+/// Equality comparison for DenseMap.
+///
+/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
+/// is also in RHS, and that no additional pairs are in RHS.
+/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
+/// complexity is linear, worst case is O(N^2) (if every hash collides).
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+          typename BucketT>
+bool operator==(
+    const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
+    const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
+  if (LHS.size() != RHS.size())
+    return false;
+
+  bool R = true;
+  LHS.forEach(
+      [&](const typename DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT,
+                                      BucketT>::value_type &KV) -> bool {
+        const auto *I = RHS.find(KV.first);
+        if (!I || I->second != KV.second) {
+          R = false;
+          return false;
+        }
+        return true;
+      });
+
+  return R;
+}
+
+/// Inequality comparison for DenseMap.
+///
+/// Equivalent to !(LHS == RHS). See operator== for performance notes.
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+          typename BucketT>
+bool operator!=(
+    const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
+    const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
+  return !(LHS == RHS);
+}
+
+template <typename KeyT, typename ValueT,
+          typename KeyInfoT = DenseMapInfo<KeyT>,
+          typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
+class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
+                                     KeyT, ValueT, KeyInfoT, BucketT> {
+  friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+  // Lift some types from the dependent base class into this class for
+  // simplicity of referring to them.
+  using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+  BucketT *Buckets = nullptr;
+  unsigned NumEntries = 0;
+  unsigned NumTombstones = 0;
+  unsigned NumBuckets = 0;
+
+ public:
+  /// Create a DenseMap with an optional \p InitialReserve that guarantee that
+  /// this number of elements can be inserted in the map without grow()
+  explicit DenseMap(unsigned InitialReserve) { init(InitialReserve); }
+  constexpr DenseMap() = default;
+
+  DenseMap(const DenseMap &other) : BaseT() {
+    init(0);
+    copyFrom(other);
+  }
+
+  DenseMap(DenseMap &&other) : BaseT() {
+    init(0);
+    swap(other);
+  }
+
+  ~DenseMap() {
+    this->destroyAll();
+    deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets);
+  }
+
+  void swap(DenseMap &RHS) {
+    Swap(Buckets, RHS.Buckets);
+    Swap(NumEntries, RHS.NumEntries);
+    Swap(NumTombstones, RHS.NumTombstones);
+    Swap(NumBuckets, RHS.NumBuckets);
+  }
+
+  DenseMap &operator=(const DenseMap &other) {
+    if (&other != this)
+      copyFrom(other);
+    return *this;
+  }
+
+  DenseMap &operator=(DenseMap &&other) {
+    this->destroyAll();
+    deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
+    init(0);
+    swap(other);
+    return *this;
+  }
+
+  void copyFrom(const DenseMap &other) {
+    this->destroyAll();
+    deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets);
+    if (allocateBuckets(other.NumBuckets)) {
+      this->BaseT::copyFrom(other);
+    } else {
+      NumEntries = 0;
+      NumTombstones = 0;
+    }
+  }
+
+  void init(unsigned InitNumEntries) {
+    auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
+    if (allocateBuckets(InitBuckets)) {
+      this->BaseT::initEmpty();
+    } else {
+      NumEntries = 0;
+      NumTombstones = 0;
+    }
+  }
+
+  void grow(unsigned AtLeast) {
+    unsigned OldNumBuckets = NumBuckets;
+    BucketT *OldBuckets = Buckets;
+
+    allocateBuckets(RoundUpToPowerOfTwo(Max<unsigned>(64, AtLeast)));
+    CHECK(Buckets);
+    if (!OldBuckets) {
+      this->BaseT::initEmpty();
+      return;
+    }
+
+    this->moveFromOldBuckets(OldBuckets, OldBuckets + OldNumBuckets);
+
+    // Free the old table.
+    deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets);
+  }
+
+ private:
+  unsigned getNumEntries() const { return NumEntries; }
+
+  void setNumEntries(unsigned Num) { NumEntries = Num; }
+
+  unsigned getNumTombstones() const { return NumTombstones; }
+
+  void setNumTombstones(unsigned Num) { NumTombstones = Num; }
+
+  BucketT *getBuckets() const { return Buckets; }
+
+  unsigned getNumBuckets() const { return NumBuckets; }
+
+  bool allocateBuckets(unsigned Num) {
+    NumBuckets = Num;
+    if (NumBuckets == 0) {
+      Buckets = nullptr;
+      return false;
+    }
+
+    uptr Size = sizeof(BucketT) * NumBuckets;
+    if (Size * 2 <= GetPageSizeCached()) {
+      // We always allocate at least a page, so use entire space.
+      unsigned Log2 = MostSignificantSetBitIndex(GetPageSizeCached() / Size);
+      Size <<= Log2;
+      NumBuckets <<= Log2;
+      CHECK_EQ(Size, sizeof(BucketT) * NumBuckets);
+      CHECK_GT(Size * 2, GetPageSizeCached());
+    }
+    Buckets = static_cast<BucketT *>(allocate_buffer(Size));
+    return true;
+  }
+
+  static void *allocate_buffer(uptr Size) {
+    return MmapOrDie(RoundUpTo(Size, GetPageSizeCached()), "DenseMap");
+  }
+
+  static void deallocate_buffer(void *Ptr, uptr Size) {
+    UnmapOrDie(Ptr, RoundUpTo(Size, GetPageSizeCached()));
+  }
+};
+
+}  // namespace Robustness
diff --git a/compiler-rt/lib/tsan/rtl/rsan_instrument.hpp 
b/compiler-rt/lib/tsan/rtl/rsan_instrument.hpp
new file mode 100644
index 0000000000000..6e3bc6a5ac744
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_instrument.hpp
@@ -0,0 +1,358 @@
+#pragma once
+#include "rsan_robustnessmodel.hpp"
+#include "rsan_map.hpp"
+#include "rsan_defs.hpp"
+#include "rsan_report.hpp"
+#include "rsan_stacktrace.hpp"
+#include "rsan_arena.hpp"
+#include "rsan_lock.hpp"
+
+namespace Robustness{
+
+       static FakeMutex fakeMutex;
+       /*!
+        * Insrumentation
+        */
+       template <typename I>
+               //struct InstrumentationTemplate : Instrumentation{
+               struct InstrumentationTemplate {
+                       private:
+                               Vsc vsc; //!< VSC tracking
+                               I ins; //!< Memory Model tracking
+                               int64_t violationsCount = 0;
+
+                               Mutex locksLock; // Global Robustness Lock
+                               Arena<Mutex> locksAllocator;
+
+
+                               template <typename KeyT, typename ValueT>
+                                       using map = 
Robustness::Map<KeyT,ValueT>;
+
+                               map<Address, Mutex*> locks;
+
+
+                               //! Thread part
+                               struct ThreadStruct{
+                                       Vsc::Thread vsc;
+                                       typename I::Thread ins;
+                                       /*! Absorb another thread
+                                        * \param w Thread struct to absorb
+                                        */
+                                       void absorb(const ThreadStruct &w){
+                                               vsc.absorb(w.vsc);
+                                               ins.absorb(w.ins);
+                                       }
+                                       void resetKnowledge(const ThreadId &t){
+                                               vsc.resetKnowledge();
+                                               ins.resetKnowledge(t);
+                                       }
+                               };
+                               //! Location Part
+                               struct LocationStruct{
+                                       Vsc::Location vsc;
+                                       typename I::Location ins;
+                                       LittleStackTrace lastWrite;
+                                       LittleStackTrace lastWriteU;
+                                       LocationId lid;
+                               };
+                               //volatile LocationId locationCounter{1};
+                               u64 locationCounter{0};
+                               // Location 0 is reserved for SC fences
+
+                               Mutex structsLock; // Global Robustness Lock
+                               Arena<ThreadStruct> threadAllocator;
+                               Arena<LocationStruct> locationAllocator;
+
+                               map<ThreadId, ThreadStruct*> threads;
+                               map<Address, LocationStruct*> locations;
+
+                               /*!
+                                * Get Location Struct for address
+                                */
+                               inline auto& getLocationStruct(Address a) {
+                                       Lock lock(&structsLock);
+                                       if (auto it = locations.find(a); it != 
locations.end()){
+                                               return *it->second;
+                                       }
+                                       auto w = locationAllocator.allocate();
+                                       //w->lid =  
__atomic_fetch_add(&locationCounter, 1, __ATOMIC_SEQ_CST) ;
+                                       w->lid = ++locationCounter;
+                                       locations[a] = w;
+                                       return *w;
+                               }
+                               /*!
+                                * Get Thread Struct for address
+                                */
+                               inline auto& getThreadStruct(ThreadId tid) {
+                                       Lock lock(&structsLock);
+                                       if (auto it = threads.find(tid); it != 
threads.end()){
+                                               return *it->second;
+                                       }
+                                       auto w = threadAllocator.allocate();
+                                       threads[tid] = w;
+                                       return *w;
+                               }
+                               /*!
+                                * Get Location Struct for address only if 
exists
+                                */
+                               inline auto getMaybeLocationStruct(Address a) {
+                                       Lock lock(&structsLock);
+                                       auto t = locations.find(a);
+                                       return (t != locations.end() ? 
t->second : nullptr);
+                               }
+
+
+                               //! returns the number of violations
+                               virtual int64_t getViolationsCount() 
/*override*/{
+                                       return violationsCount;
+                               }
+
+                               /*!
+                                * Assert no read violation occurs
+                                *
+                                * \param t Thread Id
+                                * \param l Address
+                                * \param ts ThreadStruct
+                                * \param ls Location Struct
+                                */
+                               void assertReadViolation(ThreadId t, Address a, 
ThreadStruct &ts, LocationStruct &ls, DebugInfo dbg) {
+                                       auto l = ls.lid;
+                                       if (vsc.getLastTimeStamp(t, l, ts.vsc, 
ls.vsc) > ins.getLastTimeStamp(t, l, ts.ins, ls.ins)){
+                                               
reportViolation<ViolationType::read>(t, a, l, dbg, ls.lastWrite);
+                                       }
+                               }
+                               /*!
+                                * Assert no write violation occurs
+                                *
+                                * \param t Thread Id
+                                * \param l Address
+                                * \param ts ThreadStruct
+                                * \param ls Location Struct
+                                */
+                               void assertWriteViolation(ThreadId t, Address 
a, ThreadStruct &ts, LocationStruct &ls, DebugInfo dbg) {
+                                       auto l = ls.lid;
+                                       if (vsc.getLastTimeStampU(t, l, ts.vsc, 
ls.vsc) > ins.getLastTimeStampU(t, l, ts.ins, ls.ins)){
+                                               
reportViolation<ViolationType::write>(t, a, l, dbg, ls.lastWriteU);
+                                       }
+                               }
+
+                               void assertCasViolation(ThreadId t, Address a, 
ThreadStruct &ts, LocationStruct &ls, DebugInfo dbg, uint64_t val) {
+                                       // Weak CAS
+                                       assertReadViolation(t, a, ts, ls, dbg);
+                               }
+
+                               void assertStrongCasViolation(ThreadId t, 
Address a, ThreadStruct &ts, LocationStruct &ls, DebugInfo dbg, uint64_t val) {
+                                       //auto l = ls.lid;
+                                       //if (vsc.getLastTimeStampUV(t, l, 
ts.vsc, ls.vsc, val) >= ins.getLastTimeStampU(t, l, ts.ins, ls.ins)){
+                                       //      
reportViolation<ViolationType::write>(t, a, l, dbg, ls.lastWriteU);
+                                       //} else if (vsc.getLastTimeStamp(t, l, 
ts.vsc, ls.vsc, val) >= ins.getLastTimeStamp(t, l, ts.ins, ls.ins)){
+                                       //              
reportViolation<ViolationType::read>(t, a, l, dbg, ls.lastWrite);
+                                       //}
+                               }
+
+
+                       public:
+                               /*!
+                                * Verify load statement for violation without 
updating
+                                *
+                                * \param t tid
+                                * \param l address
+                                */
+                               void verifyLoadStatement(ThreadId t, Address 
addr, morder , DebugInfo dbg) /*override*/{
+                                       auto &ls = getLocationStruct(addr);
+                                       auto &ts = getThreadStruct(t);
+                                       assertReadViolation(t, addr, ts, ls, 
dbg);
+                               }
+                               /*!
+                                * Verify store statement for violation without 
updating
+                                *
+                                * \param t tid
+                                * \param l address
+                                */
+                               void verifyStoreStatement(ThreadId t, Address 
addr, morder , DebugInfo dbg) /*override*/{
+                                       auto &ls = getLocationStruct(addr);
+                                       auto &ts = getThreadStruct(t);
+                                       assertWriteViolation(t, addr, ts, ls, 
dbg);
+                               }
+
+                               /*!
+                                * Verify robustness and update load statement
+                                *
+                                * \param t tid
+                                * \param l address
+                                * \param mo memory order
+                                */
+                               void 
updateLoadStatement(Action::AtomicLoadAction a) /*override*/{
+                                       ThreadId t = a.tid;
+                                       Address addr = a.addr;
+                                       morder mo = a.mo;
+                                       auto &ls = getLocationStruct(addr);
+                                       auto &ts = getThreadStruct(t);
+                                       LocationId l = ls.lid;
+                                       assertReadViolation(t, addr, ts, ls, 
a.dbg);
+
+                                       vsc.updateLoadStatement(t, l, ts.vsc, 
ls.vsc, mo);
+                                       ins.updateLoadStatement(t, l, ts.ins, 
ls.ins, mo);
+                               }
+
+                               /*!
+                                * Verify robustness and update store statement
+                                *
+                                * \param t tid
+                                * \param l address
+                                * \param mo memory order
+                                */
+                               void 
updateStoreStatement(Action::AtomicStoreAction a) /*override*/{
+                                       ThreadId t = a.tid;
+                                       Address addr = a.addr;
+                                       morder mo = a.mo;
+                                       uint64_t val = a.oldValue;
+                                       auto &ls = getLocationStruct(addr);
+                                       auto &ts = getThreadStruct(t);
+                                       LocationId l = ls.lid;
+                                       assertWriteViolation(t, addr, ts, ls, 
a.dbg);
+
+
+                                       vsc.updateStoreStatement(t, l, ts.vsc, 
ls.vsc, mo, val);
+                                       ins.updateStoreStatement(t, l, ts.ins, 
ls.ins, mo, val);
+
+
+                                       ObtainCurrentLine(a.dbg.thr, a.dbg.pc, 
&ls.lastWrite);
+                                       ObtainCurrentLine(a.dbg.thr, a.dbg.pc, 
&ls.lastWriteU);
+                               }
+
+                               /*!
+                                * Verify robustness and update RMW statement
+                                *
+                                * \param t tid
+                                * \param l address
+                                * \param mo memory order
+                                */
+                               void updateRmwStatement(Action::AtomicRMWAction 
a) /*override*/{
+                                       ThreadId t = a.tid;
+                                       Address addr = a.addr;
+                                       morder mo = a.mo;
+                                       uint64_t val = a.oldValue;
+                                       auto &ls = getLocationStruct(addr);
+                                       auto &ts = getThreadStruct(t);
+                                       LocationId l = ls.lid;
+                                       assertWriteViolation(t, addr, ts, ls, 
a.dbg);
+
+                                       vsc.updateRmwStatement(t, l, ts.vsc, 
ls.vsc, mo, val);
+                                       ins.updateRmwStatement(t, l, ts.ins, 
ls.ins, mo, val);
+
+                                       ObtainCurrentLine(a.dbg.thr, a.dbg.pc, 
&ls.lastWrite);
+                               }
+
+                               void updateCasStatement(Action::AtomicCasAction 
a) /*override*/{
+                                       ThreadId t = a.tid;
+                                       Address addr = a.addr;
+                                       morder mo = a.mo;
+                                       uint64_t expected = a.oldValue;
+                                       bool success = a.success;
+                                       auto &ls = getLocationStruct(addr);
+                                       auto &ts = getThreadStruct(t);
+                                       LocationId l = ls.lid;
+                                       assertCasViolation(t, addr, ts, ls, 
a.dbg, expected);
+
+                                       if (success){
+                                               vsc.updateRmwStatement(t, l, 
ts.vsc, ls.vsc, mo, expected);
+                                               ins.updateRmwStatement(t, l, 
ts.ins, ls.ins, mo, expected);
+                                               ObtainCurrentLine(a.dbg.thr, 
a.dbg.pc, &ls.lastWrite);
+                                       } else {
+                                               vsc.updateLoadStatement(t, l, 
ts.vsc, ls.vsc, mo);
+                                               ins.updateLoadStatement(t, l, 
ts.ins, ls.ins, mo);
+                                       }
+
+                               }
+
+                               /*!
+                                * Update fence statement
+                                *
+                                * \param t tid
+                                * \param mo memory order
+                                */
+                               void updateFenceStatement(ThreadId t, morder 
mo) /*override*/{
+                                       // HACK: This might break on 
architectures that use the address 0
+                                       auto &ls = getLocationStruct(0);
+                                       auto &ts = getThreadStruct(t);
+                                       ins.updateFenceStatement(t, ts.ins, 
ls.ins, mo);
+                               }
+
+                               /*!
+                                * Absorb knowledge from thread (Join)
+                                *
+                                * \param _absorber The thread to gain the 
knowledge
+                                * \param _absorbee The thread giving the 
knowldge
+                                */
+                               void absorbThread(ThreadId _absorber, ThreadId 
_absorbee) /*override*/{
+                                       auto &absorber = 
getThreadStruct(_absorber);
+                                       auto &absorbee = 
getThreadStruct(_absorbee);
+                                       absorber.absorb(absorbee);
+                               }
+
+                               //! Initialize thread data structure
+                               void initThread(ThreadId tid) {
+                                       ins.initThread(tid, 
getThreadStruct(tid).ins);
+                               }
+
+                               /*!
+                                * Clone knowledge of current thread to a new 
thread
+                                *
+                                * \param _src The thread creating the new 
thread
+                                * \param _dst The newely created thread
+                                */
+                               void cloneThread(ThreadId _src, ThreadId _dst)  
/*override*/{
+                                       auto &dst = getThreadStruct(_dst);
+                                       auto &src = getThreadStruct(_src);
+                                       dst.resetKnowledge(_dst);
+                                       dst.absorb(src);
+                                       initThread(_dst);
+                               }
+
+
+                               /*!
+                                * Free chunk of memory, removing knowledge by 
all relations and
+                                * verifying the deletion doesn't violate 
anything
+                                *
+                                * \param t tid
+                                * \param l Address
+                                * \param size size from address
+                                */
+                               void freeMemory(Action::Free w) /*override*/{
+                                       auto &t = w.tid;
+                                       auto &addr = w.addr;
+                                       auto &size = w.size;
+
+                                       auto &ts = getThreadStruct(t);
+
+
+                                       // We don't free the memory. We just 
mark the location as known to all.
+                                       for (auto a = addr; a <addr+size; ++a){
+                                               if (a == 0) continue;
+                                               auto ls = 
getMaybeLocationStruct(a);
+                                               if (ls){
+                                                       Lock 
instLock(getLockForAddr(a));
+                                                       assertWriteViolation(t, 
a, ts, *ls, w.dbg);
+                                                       
vsc.freeLocation(ls->lid, ls->vsc);
+                                                       
ins.freeLocation(ls->lid, ls->ins);
+                                               }
+                                       }
+                               }
+
+                               Mutex* getLockForAddr(Address addr){
+                                       if (!Robustness::isRobustness())
+                                               return &fakeMutex;
+                                       Lock lock(&locksLock);
+                                       if (auto it = locks.find(addr); it != 
locks.end()){
+                                               return it->second;
+                                       }
+                                       auto newLock = 
locksAllocator.allocate();
+                                       locks[addr] = newLock;
+                                       return newLock;
+                               }
+
+               };
+
+       inline Robustness::InstrumentationTemplate<Robustness::VrlxNoFence> ins;
+} // namespace Robustness
diff --git a/compiler-rt/lib/tsan/rtl/rsan_lock.hpp 
b/compiler-rt/lib/tsan/rtl/rsan_lock.hpp
new file mode 100644
index 0000000000000..864882ca016ed
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_lock.hpp
@@ -0,0 +1,33 @@
+#pragma once
+#include "rsan_defs.hpp"
+
+//class __tsan::ThreadState;
+
+namespace Robustness{
+class SANITIZER_MUTEX FakeMutex : public Mutex {
+ public:
+  explicit constexpr FakeMutex(__tsan::MutexType type = __tsan::MutexUnchecked)
+      : Mutex(type) {}
+
+  void Lock() SANITIZER_ACQUIRE() { }
+
+  bool TryLock() SANITIZER_TRY_ACQUIRE(true) { return true; }
+
+  void Unlock() SANITIZER_RELEASE() { }
+
+  void ReadLock() SANITIZER_ACQUIRE_SHARED() { }
+
+  void ReadUnlock() SANITIZER_RELEASE_SHARED() { }
+
+  void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() { }
+
+  void CheckLocked() const SANITIZER_CHECK_LOCKED() {}
+
+  void CheckReadLocked() const SANITIZER_CHECK_LOCKED() { }
+
+
+  //FakeMutex(LinkerInitialized) = delete;
+  FakeMutex(const FakeMutex &) = delete;
+  void operator=(const FakeMutex &) = delete;
+};
+} // namespace Robustness
diff --git a/compiler-rt/lib/tsan/rtl/rsan_map.hpp 
b/compiler-rt/lib/tsan/rtl/rsan_map.hpp
new file mode 100644
index 0000000000000..2bbb098c11ed1
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_map.hpp
@@ -0,0 +1,88 @@
+#pragma once
+#include "rsan_vector.h"
+
+namespace Robustness {
+template<
+    class Key,
+    class T
+> class Map {
+
+
+       Vector<Pair<Key, T>> v;
+
+       u64 findLocationLinear(Key k, u64 start, u64 end){
+               for (u64 i=start; i<end;++i)
+                       if (v[i].first >= k) return i;
+               return end;
+       }
+
+       u64 find_(Key k, u64 first = 0){
+               const auto len = v.size();
+               size_t count = len - first;
+               while (count > 0) {
+                       if (count <= 8) return findLocationLinear(k, first, 
count+first);
+                       u64 step = count / 2, it = first + step;
+                       u64 tkey = v[it].first;
+                       if (tkey > k){
+                               count = step;
+                       } else if (tkey < k){
+                               first = it + 1;
+                               count -= step + 1;
+                       } else {
+                               return it;
+                       }
+               }
+               return first;
+       }
+
+       public:
+
+
+       template< class... Args >
+       Pair<Pair<Key, T>*, bool> try_emplace( const Key& k, Args&&... args ){
+               auto i = find_(k);
+               if (i < v.size() && v[i].first == k){
+                       return pair(&v[i], false);
+               } else {
+                       v.insert(i, pair(k, T(args...)));
+                       return pair(&v[i], true);
+               }
+       }
+
+       decltype(v.end()) find(const Key &k){
+               auto i = find_(k);
+               if (i < v.size() && v[i].first == k)
+                       return &v[i];
+               else
+                       return v.end();
+       }
+
+
+       decltype(v.begin()) begin(){
+               return v.begin();
+       }
+       decltype(v.begin()) end(){
+               return v.end();
+       }
+
+       bool contains(const Key &k){
+               return find(k) != v.end();
+       }
+
+       void clear(){
+               v.clear();
+       }
+
+       T& operator[]( Key&& key ){
+               return this->try_emplace(key).first->second;
+       }
+       T& operator[]( const Key& key ){
+               return this->try_emplace(key).first->second;
+       }
+
+       auto size(){
+               return v.size();
+       }
+
+};
+} // namespace Robustness
diff --git a/compiler-rt/lib/tsan/rtl/rsan_memoryorder.hpp 
b/compiler-rt/lib/tsan/rtl/rsan_memoryorder.hpp
new file mode 100644
index 0000000000000..5da25b7330145
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_memoryorder.hpp
@@ -0,0 +1,65 @@
+#pragma once
+#include "tsan_defs.h"
+#include "tsan_interface.h"
+namespace Robustness{
+       using __tsan::morder;
+       using __tsan::mo_relaxed;
+       using __tsan::mo_consume;
+       using __tsan::mo_acquire;
+       using __tsan::mo_release;
+       using __tsan::mo_acq_rel;
+       using __tsan::mo_seq_cst;
+       //! Check if lhs is at least as strong as rhs.
+       /*!
+        * Check if memory order is at least as strong as another
+        * \param lhs memory order
+        * \param rhs memory order
+        * \return true if lhs is at least as powerful as rhs
+        */
+       inline bool atLeast(__tsan::morder lhs, __tsan::morder rhs){
+               using namespace std;
+               switch (rhs) {
+                       case __tsan::mo_relaxed:
+                               return true;
+                       case __tsan::mo_consume:
+                       case __tsan::mo_acquire:
+                               switch (lhs) {
+                                       case __tsan::mo_relaxed:
+                                       case __tsan::mo_release:
+                                               return false;
+                                       case __tsan::mo_acq_rel:
+                                       case __tsan::mo_acquire:
+                                       case __tsan::mo_seq_cst:
+                                               return true;
+                                       case __tsan::mo_consume:
+                                               //assertm("Consume not 
supported", 0);
+                                       default:
+                                               //assertm("Unknown memory order 
value", 0);
+                                               // TODO: Remove bugs from here
+                                               return false;
+                               }
+                       case __tsan::mo_release:
+                               switch (lhs) {
+                                       case __tsan::mo_relaxed:
+                                       case __tsan::mo_acquire:
+                                       case __tsan::mo_consume:
+                                               return false;
+                                       case __tsan::mo_acq_rel:
+                                       case __tsan::mo_release:
+                                       case __tsan::mo_seq_cst:
+                                               return true;
+                                       default:
+                                               // TODO: Remove bugs from here
+                                               //assertm("Unknown memory order 
value", 0);
+                                               return false;
+                               }
+                       case __tsan::mo_acq_rel:
+                               return lhs == __tsan::mo_seq_cst || lhs == 
__tsan::mo_acq_rel;
+                       case __tsan::mo_seq_cst:
+                               return lhs == __tsan::mo_seq_cst;
+               }
+               //assertm(0, "Unhandeled atLeast for some memory order");
+               __builtin_unreachable();
+       }
+} // namespace Robustness
+
diff --git a/compiler-rt/lib/tsan/rtl/rsan_report.cpp 
b/compiler-rt/lib/tsan/rtl/rsan_report.cpp
new file mode 100644
index 0000000000000..064713e7472ef
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_report.cpp
@@ -0,0 +1,72 @@
+#include "rsan_report.hpp"
+#include "rsan_defs.hpp"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
+namespace __tsan {
+
+void GetLineOfCode(InternalScopedString& res, const ReportStack *ent) {
+  if (ent == 0 || ent->frames == 0) {
+    res.Append("[failed to locate source]");
+    return;
+  }
+  SymbolizedStack *frame = ent->frames;
+  for (int i = 0; frame && frame->info.address; frame = frame->next, i++) {
+       const char *formatString = "%S";
+       // FIXME: Need to extract this...
+       StackTracePrinter::GetOrInit()->RenderFrame(
+                       &res, formatString, i, frame->info.address,
+                       &frame->info, common_flags()->symbolize_vs_style,
+                       common_flags()->strip_path_prefix);
+  }
+}
+
+ReportStack SymbolizeStack(StackTrace trace) {
+  if (trace.size == 0)
+    return ReportStack();
+  SymbolizedStack *top = nullptr;
+  for (uptr si = 0; si < trace.size; si++) {
+    const uptr pc = trace.trace[si];
+    uptr pc1 = pc;
+    // We obtain the return address, but we're interested in the previous
+    // instruction.
+    if ((pc & kExternalPCBit) == 0)
+      pc1 = StackTrace::GetPreviousInstructionPc(pc);
+    SymbolizedStack *ent = SymbolizeCode(pc1);
+    CHECK_NE(ent, 0);
+    SymbolizedStack *last = ent;
+    while (last->next) {
+      last->info.address = pc;  // restore original pc for report
+      last = last->next;
+    }
+    last->info.address = pc;  // restore original pc for report
+    last->next = top;
+    top = ent;
+  }
+  //StackStripMain(top);
+
+  ReportStack stack;
+  stack.frames = top;
+  return stack;
+}
+
+void getCurrentLine(InternalScopedString &ss, ThreadState *thr, uptr pc) {
+       //CheckedMutex::CheckNoLocks();
+       ScopedIgnoreInterceptors ignore;
+       //
+  // We need to lock the slot during RestoreStack because it protects
+  // the slot journal.
+  //Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx);
+  //ThreadRegistryLock l0(&ctx->thread_registry);
+  //Lock slots_lock(&ctx->slot_mtx);
+
+  VarSizeStackTrace trace;
+  ObtainCurrentLine(thr, pc, &trace);
+  auto stack = SymbolizeStack(trace);
+  GetLineOfCode(ss, &stack);
+}
+
+
+} //namespace __tsan
+
+namespace Robustness {
+}// namespace Robustness
+
diff --git a/compiler-rt/lib/tsan/rtl/rsan_report.hpp 
b/compiler-rt/lib/tsan/rtl/rsan_report.hpp
new file mode 100644
index 0000000000000..7163fc42f981d
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_report.hpp
@@ -0,0 +1,85 @@
+#pragma once
+#include "tsan_defs.h"
+#include "tsan_rtl.h"
+#include "tsan_symbolize.h"
+#include "tsan_flags.h"
+#include "rsan_defs.hpp"
+#include "tsan_stack_trace.h"
+#include "rsan_stacktrace.hpp"
+namespace __tsan {
+       class ThreadState;
+
+void GetLineOfCode(InternalScopedString& res, const ReportStack *ent);
+ReportStack SymbolizeStack(StackTrace trace);
+
+template<typename StackTraceTy>
+void ObtainCurrentLine(ThreadState *thr, uptr toppc, StackTraceTy *stack,
+                        uptr *tag = nullptr) {
+  uptr size = thr->shadow_stack_pos - thr->shadow_stack;
+  uptr start = 0;
+  const auto kStackTraceMax = Robustness::kStackTraceMax;
+  if (size + !!toppc > kStackTraceMax) {
+    start = size + !!toppc - kStackTraceMax;
+    size = kStackTraceMax - !!toppc;
+  }
+  stack->Init(&thr->shadow_stack[start], size, toppc);
+  ExtractTagFromStack(stack, tag);
+}
+void getCurrentLine(InternalScopedString &s, ThreadState *thr, uptr pc);
+
+} //namespace __tsan
+
+namespace Robustness {
+       using __tsan::ObtainCurrentLine;
+       using __tsan::getCurrentLine;
+
+
+
+template<Robustness::ViolationType V> void 
reportViolation(Robustness::ThreadId t, Robustness::Address a, 
Robustness::LocationId lid, const DebugInfo &dbg, const LittleStackTrace &prev 
= {}){
+       InternalScopedString ss;
+       getCurrentLine(ss, dbg.thr, dbg.pc);
+       InternalScopedString prevs;
+
+       auto oldstack = __tsan::SymbolizeStack(prev);
+       __tsan::GetLineOfCode(prevs, &oldstack);
+
+       // TODO: Make the color codes easier to use
+       // TODO: Update print functions
+       if constexpr (V == Robustness::ViolationType::read ||
+                       V == Robustness::ViolationType::write){
+               //++violationsCount;
+               const char *fmtString;
+#define PRESTRING "\033[1;31mRobustness Violation: Tid: %u, Address: %llx 
(%d), Type: "
+#define POSTSTRING " %d, Violation: %s, PrevAccess: %s\033[0m\n"
+               if constexpr (V == Robustness::ViolationType::read)
+                       fmtString = PRESTRING "rd" POSTSTRING;
+               else
+                       fmtString = PRESTRING "st" POSTSTRING;
+#undef PRESTRING
+#undef POSTRING
+               Printf(fmtString, t, a, lid, (int)V, ss.data(), prevs.data());
+       } else
+               static_assert(Robustness::always_false_v<decltype(V)>, "Unknwon 
error type");
+}
+
+template<Robustness::ViolationType V> void 
reportViolation(Robustness::ThreadId t, Robustness::Address a, const DebugInfo& 
dbg, const LittleStackTrace &prev, uint64_t val){
+       InternalScopedString ss;
+       getCurrentLine(ss, dbg.thr, dbg.pc);
+       InternalScopedString prevs;
+
+       auto oldstack = __tsan::SymbolizeStack(prev);
+       __tsan::GetLineOfCode(prevs, &oldstack);
+
+       if constexpr (V == Robustness::ViolationType::read ||
+                       V == Robustness::ViolationType::write){
+               const char *fmtString;
+               if constexpr (V == Robustness::ViolationType::read)
+                       fmtString = "\033[1;31mRobustness Violation: Tid: %u, 
Address: %llx, Type: rd %d, Val: %llu Violation: %s, PrevAccess: %s\033[0m\n";
+               else
+                       fmtString = "\033[1;31mRobustness Violation: Tid: %u, 
Address: %llx, Type: st %d, Val: %llu Violation: %s, PrevAccess: %s\033[0m\n";
+               Printf(fmtString, t, a, (int)V, val, ss.data(), prevs.data());
+       } else
+               static_assert(Robustness::always_false_v<decltype(V)>, "Unknwon 
error type");
+}
+} //namespace Robustness
+
diff --git a/compiler-rt/lib/tsan/rtl/rsan_robustnessmodel.hpp 
b/compiler-rt/lib/tsan/rtl/rsan_robustnessmodel.hpp
new file mode 100644
index 0000000000000..4aa0eaa8c9120
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_robustnessmodel.hpp
@@ -0,0 +1,286 @@
+#pragma once
+#include "rsan_memoryorder.hpp"
+#include "rsan_vectorclock.hpp"
+#include "rsan_defs.hpp"
+#include "rsan_vector.h"
+#include "rsan_map.hpp"
+#include "rsan_action.hpp"
+
+
+namespace Robustness {
+       template<class KeyT, class ValueT>
+       using map = Robustness::Map<KeyT,ValueT>;
+
+       //! Track SC with VectorClocks
+       struct Vsc{
+               //! Thread component
+               struct Thread{
+                       VectorClock v, vu;
+                       //! Absorb other thread into self
+                       void absorb(const Thread &t){
+                               v |= t.v;
+                               vu |= t.vu;
+                       }
+                       void resetKnowledge(){
+                               v.reset();
+                               vu.reset();
+                       }
+               };
+               //! Location component
+               struct Location{
+                       timestamp_t stamp = 0;
+                       timestamp_t stampu = 0;
+                       VectorClock m, w;
+                       VectorClock mu, wu;
+               };
+
+               /*!
+                * Update load statement
+                *
+                * Memory order is ignored for SC
+                */
+               void updateLoadStatement(ThreadId , LocationId , Thread &ts, 
Location &ls, morder){
+                       ls.m |= ts.v;
+                       ts.v |= ls.w;
+
+                       ls.mu |= ts.vu;
+                       ts.vu |= ls.wu;
+               }
+
+               /*!
+                * Update store statement
+                *
+                * Memory order is ignored for SC
+                */
+               void updateStoreStatement(ThreadId , LocationId a, Thread &ts, 
Location &ls, morder, u64 val){
+                       ls.m |= timestamp(a, ++ls.stamp);
+                       ts.v |= ls.m;
+                       ls.w = ts.v;
+                       ls.m = ts.v;
+
+                       ls.mu |= timestamp(a, ++ls.stampu);
+                       ts.vu |= ls.mu;
+                       ls.wu = ts.vu;
+                       ls.mu = ts.vu;
+               }
+
+               /*!
+                * Update RMW statement
+                *
+                * Memory order is ignored for SC
+                */
+               void updateRmwStatement(ThreadId t, LocationId a, Thread &ts, 
Location &ls, morder mo, u64 val){
+                       //return updateStoreStatement(t, a, ts, ls, mo);
+                       ls.m |= timestamp(a, ++ls.stamp);
+                       ts.v |= ls.m;
+                       ls.w = ts.v;
+                       ls.m = ts.v;
+
+                       ts.vu |= ls.mu;
+                       ls.wu = ts.vu;
+                       ls.mu = ts.vu;
+               }
+
+               //! Check if Thread knows of last write to \arg l
+               bool knowsLastWrite(ThreadId , LocationId l, Thread &ts, 
Location &ls) const{
+                       return ls.w[l] <= ts.v[l];
+               }
+               timestamp_t getLastTimeStamp(ThreadId , LocationId l, Thread 
&ts, Location &ls) const{
+                       return ts.v[l].ts;
+               }
+               timestamp_t getLastTimeStampU(ThreadId , LocationId l, Thread 
&ts, Location &ls) const{
+                       return ts.vu[l].ts;
+               }
+               timestamp_t getLastTimeStampV(ThreadId , LocationId l, Thread 
&ts, Location &ls, u64 val) const{
+                       return ts.v[l].ts - 1;
+               }
+               timestamp_t getLastTimeStampUV(ThreadId , LocationId l, Thread 
&ts, Location &ls, u64 val) const{
+                       return ts.vu[l].ts - 1;
+               }
+
+               //! Remove locations when freeing memory
+               void freeLocation(LocationId l, Location &ls){
+                       ls.w.reset();
+                       ls.m.reset();
+               }
+       };
+
+
+       //! Trace trace with RC20 semantics
+       struct VrlxNoFence{
+               //! Thread component
+               struct Thread{
+                       VectorClock vc;
+                       VectorClock vr;
+                       VectorClock va;
+                       VectorClock vcu;
+                       VectorClock vru;
+                       VectorClock vau;
+
+                       //! Absorb thread view into self
+                       void absorb(const Thread &t){
+                               vc |= t.vc;
+                               vr |= t.vr;
+                               va |= t.va;
+                               vcu |= t.vcu;
+                               vru |= t.vru;
+                               vau |= t.vau;
+                       }
+
+                       void resetKnowledge(ThreadId t){
+                               vc.reset();
+                               vr.reset();
+                               va.reset();
+                               vcu.reset();
+                               vru.reset();
+                               vau.reset();
+                       }
+               };
+               //! Location component
+               struct Location{
+                       timestamp_t writeStamp = 0, writeStampU = 0;
+                       VectorClock w;
+                       VectorClock wu;
+               };
+               //! Initlialize thread
+               void initThread(ThreadId tid, Thread &ts){
+               }
+
+
+               //! Update load statement
+               void updateLoadStatement(ThreadId , LocationId a, Thread &ts, 
Location &ls, morder mo){
+                       ts.va |= ls.w;
+                       ts.vau |= ls.wu;
+                       if (atLeast(mo, (mo_acquire))){
+                               ts.vc |= ls.w;
+                               ts.vcu |= ls.wu;
+                       } else {
+                               ts.vc |= ls.w[a];
+                               ts.vcu |= ls.wu[a];
+                       }
+               }
+
+               //! Update store statement
+               void updateStoreStatement(ThreadId t, LocationId a, Thread &ts, 
Location &ls, morder mo, uint64_t oldValue){
+                       const auto timestampV =  timestamp(a, ++ls.writeStamp);
+                       const auto timestampVU = timestamp(a, ++ls.writeStampU);
+                       ls.w  |= timestampV;
+                       ls.wu |= timestampVU;
+                       ts.va |= timestampV;
+                       ts.vc |= timestampV;
+                       ts.vau |= timestampVU;
+                       ts.vcu |= timestampVU;
+
+
+                       if (atLeast(mo, (mo_release))){
+                               ls.w = ts.vc;
+                               ls.wu = ts.vcu;
+                       } else {
+                               ls.w = ts.vr;
+                               ls.w |= timestampV;
+                               ls.wu = ts.vru;
+                               ls.wu |= timestampVU;
+                       }
+               }
+
+               //! Update RMW statement
+               void updateRmwStatement(ThreadId t, LocationId a, Thread &ts, 
Location &ls, morder mo, uint64_t oldValue){
+                       const auto timestampV =  timestamp(a, ++ls.writeStamp);
+                       ls.w  |= timestampV;
+                       ts.va |= timestampV;
+                       ts.vc |= timestampV;
+
+
+                       ts.va |= ls.w;
+                       ts.vau |= ls.wu;
+                       if (atLeast(mo, (mo_acquire))){
+                               ts.vc |= ls.w;
+                               ts.vcu |= ls.wu;
+                       } else {
+                               ts.vcu |= ls.wu[a];
+                       }
+
+                       if (atLeast(mo, (mo_release))){
+                               ls.w |= ts.vc;
+                               ls.wu |= ts.vcu;
+                       } else {
+                               ls.w |= ts.vr;
+                               ls.wu |= ts.vru;
+                       }
+               }
+
+
+               Mutex SCLock;
+               /*!
+                * Update fence statement
+                *
+                * seq_cst fences are compiled to fence(acq); RMW(acq_rel); 
fence(rel);
+                */
+               void updateFenceStatement(ThreadId t, Thread &ts, Location &ls, 
morder mo){
+                       if (mo == mo_seq_cst){
+                               updateFenceStatement(t, ts, ls, mo_acquire);
+                               {
+                                       Lock instLock(&SCLock);
+                                       updateRmwStatement(t, LocationId(0), 
ts, ls, mo_acq_rel, 0);
+                               }
+                               updateFenceStatement(t, ts, ls, mo_release);
+                               return;
+                       }
+                       if (atLeast(mo, (mo_acquire))){
+                               ts.vc = ts.va;
+                               ts.vcu = ts.vau;
+                       }
+                       if (atLeast(mo, (mo_release))){
+                               ts.vr = ts.vc;
+                               ts.vru = ts.vcu;
+                       }
+               }
+
+               auto getLastTimeStamp(ThreadId t, LocationId l, Thread &ts, 
Location &ls){
+                       return ts.vc[l].ts;
+               }
+               auto getLastTimeStampU(ThreadId t, LocationId l, Thread &ts, 
Location &ls){
+                       return ts.vcu[l].ts;
+               }
+
+
+
+               //! Remove locations when freeing memory
+               void freeLocation(LocationId l, Location &ls){
+                       ls.w.reset();
+                       ls.wu.reset();
+               }
+       };
+
+
+       /// Instrumentation
+class Instrumentation{
+       public:
+       virtual void verifyLoadStatement(ThreadId t, LocationId l, morder mo) = 
0;
+       virtual void verifyStoreStatement(ThreadId t, LocationId l, morder mo) 
= 0;
+       virtual void updateLoadStatement(Action::AtomicLoadAction a) = 0;
+       virtual void updateStoreStatement(Action::AtomicStoreAction a) = 0;
+       virtual void updateRmwStatement(Action::AtomicRMWAction a) = 0;
+       virtual void updateCasStatement(Action::AtomicCasAction a) = 0;
+       virtual void updateFenceStatement(ThreadId t, morder mo) = 0;
+       virtual void updateNALoad(ThreadId t, LocationId l) = 0;
+       virtual void updateNAStore(ThreadId t, LocationId l) = 0;
+
+       virtual void absorbThread(ThreadId _absorber, ThreadId _absorbee) = 0;
+       virtual void cloneThread(ThreadId _src, ThreadId dst) = 0;
+       //void initThread(ThreadId tid);
+       //void removeThread(ThreadId t);
+
+       virtual int64_t getViolationsCount() = 0;
+       virtual int64_t getRacesCount() = 0;
+       virtual void freeMemory(ThreadId t, LocationId l, ptrdiff_t size) = 0;
+
+       virtual void trackAtomic(ThreadId t, LocationId l, uint64_t val) = 0;
+       virtual void waitAtomic(Action::WaitAction a) = 0;
+       virtual void bcasAtomic(Action::BcasAction a) = 0;
+
+       virtual ~Instrumentation() = default;
+};
+
+} // namespace Robustness
+
diff --git a/compiler-rt/lib/tsan/rtl/rsan_stacktrace.cpp 
b/compiler-rt/lib/tsan/rtl/rsan_stacktrace.cpp
new file mode 100644
index 0000000000000..6f6d20a4263a8
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_stacktrace.cpp
@@ -0,0 +1,134 @@
+//===-- sanitizer_stacktrace.cpp 
------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM 
Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "rsan_stacktrace.hpp"
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_platform.h"
+#include "sanitizer_common/sanitizer_ptrauth.h"
+
+namespace Robustness {
+       using __sanitizer::StackTrace;
+       using __sanitizer::uptr;
+
+
+void LittleStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
+  size = cnt + !!extra_top_pc;
+  CHECK_LE(size, kStackTraceMax);
+  internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));
+  if (extra_top_pc)
+    trace_buffer[cnt] = extra_top_pc;
+  top_frame_bp = 0;
+}
+
+// Sparc implementation is in its own file.
+#if !defined(__sparc__)
+
+// In GCC on ARM bp points to saved lr, not fp, so we should check the next
+// cell in stack to be a saved frame pointer. GetCanonicFrame returns the
+// pointer to saved frame pointer in any case.
+static inline uhwptr *GetCanonicFrame(uptr bp,
+                                      uptr stack_top,
+                                      uptr stack_bottom) {
+  CHECK_GT(stack_top, stack_bottom);
+#ifdef __arm__
+  if (!IsValidFrame(bp, stack_top, stack_bottom)) return 0;
+  uhwptr *bp_prev = (uhwptr *)bp;
+  if (IsValidFrame((uptr)bp_prev[0], stack_top, stack_bottom)) return bp_prev;
+  // The next frame pointer does not look right. This could be a GCC frame, 
step
+  // back by 1 word and try again.
+  if (IsValidFrame((uptr)bp_prev[-1], stack_top, stack_bottom))
+    return bp_prev - 1;
+  // Nope, this does not look right either. This means the frame after next 
does
+  // not have a valid frame pointer, but we can still extract the caller PC.
+  // Unfortunately, there is no way to decide between GCC and LLVM frame
+  // layouts. Assume LLVM.
+  return bp_prev;
+#else
+  return (uhwptr*)bp;
+#endif
+}
+
+void LittleStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
+                                    uptr stack_bottom, u32 max_depth) {
+  // TODO(yln): add arg sanity check for stack_top/stack_bottom
+  CHECK_GE(max_depth, 2);
+  const uptr kPageSize = GetPageSizeCached();
+  trace_buffer[0] = pc;
+  size = 1;
+  if (stack_top < 4096) return;  // Sanity check for stack top.
+  uhwptr *frame = GetCanonicFrame(bp, stack_top, stack_bottom);
+  // Lowest possible address that makes sense as the next frame pointer.
+  // Goes up as we walk the stack.
+  uptr bottom = stack_bottom;
+  // Avoid infinite loop when frame == frame[0] by using frame > prev_frame.
+  while (IsValidFrame((uptr)frame, stack_top, bottom) &&
+         IsAligned((uptr)frame, sizeof(*frame)) &&
+         size < max_depth) {
+#ifdef __powerpc__
+    // PowerPC ABIs specify that the return address is saved at offset
+    // 16 of the *caller's* stack frame.  Thus we must dereference the
+    // back chain to find the caller frame before extracting it.
+    uhwptr *caller_frame = (uhwptr*)frame[0];
+    if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
+        !IsAligned((uptr)caller_frame, sizeof(uhwptr)))
+      break;
+    uhwptr pc1 = caller_frame[2];
+#elif defined(__s390__)
+    uhwptr pc1 = frame[14];
+#elif defined(__loongarch__) || defined(__riscv)
+    // frame[-1] contains the return address
+    uhwptr pc1 = frame[-1];
+#else
+    uhwptr pc1 = STRIP_PAC_PC((void *)frame[1]);
+#endif
+    // Let's assume that any pointer in the 0th page (i.e. <0x1000 on i386 and
+    // x86_64) is invalid and stop unwinding here.  If we're adding support for
+    // a platform where this isn't true, we need to reconsider this check.
+    if (pc1 < kPageSize)
+      break;
+    if (pc1 != pc) {
+      trace_buffer[size++] = (uptr) pc1;
+    }
+    bottom = (uptr)frame;
+#if defined(__loongarch__) || defined(__riscv)
+    // frame[-2] contain fp of the previous frame
+    uptr new_bp = (uptr)frame[-2];
+#else
+    uptr new_bp = (uptr)frame[0];
+#endif
+    frame = GetCanonicFrame(new_bp, stack_top, bottom);
+  }
+}
+
+#endif  // !defined(__sparc__)
+
+void LittleStackTrace::PopStackFrames(uptr count) {
+  CHECK_LT(count, size);
+  size -= count;
+  for (uptr i = 0; i < size; ++i) {
+    trace_buffer[i] = trace_buffer[i + count];
+  }
+}
+
+static uptr Distance(uptr a, uptr b) { return a < b ? b - a : a - b; }
+
+uptr LittleStackTrace::LocatePcInTrace(uptr pc) {
+  uptr best = 0;
+  for (uptr i = 1; i < size; ++i) {
+    if (Distance(trace[i], pc) < Distance(trace[best], pc)) best = i;
+  }
+  return best;
+}
+
+}  // namespace Robustness
diff --git a/compiler-rt/lib/tsan/rtl/rsan_stacktrace.hpp 
b/compiler-rt/lib/tsan/rtl/rsan_stacktrace.hpp
new file mode 100644
index 0000000000000..9b06f2cb79e66
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_stacktrace.hpp
@@ -0,0 +1,92 @@
+#pragma once
+
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "rsan_defs.hpp"
+
+namespace Robustness {
+       using __sanitizer::uhwptr;
+
+static const u32 kStackTraceMax = 4;
+
+// StackTrace that owns the buffer used to store the addresses.
+struct LittleStackTrace : public __sanitizer::StackTrace {
+  uptr trace_buffer[kStackTraceMax] = {};
+  uptr top_frame_bp;  // Optional bp of a top frame.
+
+  LittleStackTrace() : __sanitizer::StackTrace(trace_buffer, 0), 
top_frame_bp(0) {}
+
+  void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
+
+  // Get the stack trace with the given pc and bp.
+  // The pc will be in the position 0 of the resulting stack trace.
+  // The bp may refer to the current frame or to the caller's frame.
+  void Unwind(uptr pc, uptr bp, void *context, bool request_fast,
+              u32 max_depth = kStackTraceMax) {
+    top_frame_bp = (max_depth > 0) ? bp : 0;
+    // Small max_depth optimization
+    if (max_depth <= 1) {
+      if (max_depth == 1)
+        trace_buffer[0] = pc;
+      size = max_depth;
+      return;
+    }
+    UnwindImpl(pc, bp, context, request_fast, max_depth);
+  }
+
+  void Unwind(u32 max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
+              uptr stack_bottom, bool request_fast_unwind);
+
+  void Reset() {
+    *static_cast<StackTrace *>(this) = StackTrace(trace_buffer, 0);
+    top_frame_bp = 0;
+  }
+
+  LittleStackTrace(const LittleStackTrace &rhs) : StackTrace(trace, 0) {
+         trace = trace_buffer;
+         size = rhs.size;
+         for (auto i = 0u; i < kStackTraceMax; ++i)
+                 trace_buffer[i] = rhs.trace_buffer[i];
+         top_frame_bp = rhs.top_frame_bp;
+  }
+  //void operator=(const LittleStackTrace &rhs) : StackTrace(trace, 0) {
+  //    trace = trace_buffer;
+  //    size = rhs.size;
+  //    for (auto i = 0u; i < kStackTraceMax; ++i)
+  //     trace_buffer[i] = rhs.trace_buffer[i];
+  //    top_frame_bp = rhs.top_frame_bp;
+  //}
+
+ private:
+  // Every runtime defines its own implementation of this method
+  void UnwindImpl(uptr pc, uptr bp, void *context, bool request_fast,
+                  u32 max_depth);
+
+  // UnwindFast/Slow have platform-specific implementations
+  void UnwindFast(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,
+                  u32 max_depth);
+  void UnwindSlow(uptr pc, u32 max_depth);
+  void UnwindSlow(uptr pc, void *context, u32 max_depth);
+
+  void PopStackFrames(uptr count);
+  uptr LocatePcInTrace(uptr pc);
+
+
+  friend class FastUnwindTest;
+};
+
+#if defined(__s390x__)
+static const uptr kFrameSize = 160;
+#elif defined(__s390__)
+static const uptr kFrameSize = 96;
+#else
+static const uptr kFrameSize = 2 * sizeof(uhwptr);
+#endif
+
+// Check if given pointer points into allocated stack area.
+static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) 
{
+  return frame > stack_bottom && frame < stack_top - kFrameSize;
+}
+
+}  // namespace Robustness
+
diff --git a/compiler-rt/lib/tsan/rtl/rsan_vector.h 
b/compiler-rt/lib/tsan/rtl/rsan_vector.h
new file mode 100644
index 0000000000000..8e2a0764193b2
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_vector.h
@@ -0,0 +1,178 @@
+//===-- sanitizer_vector.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM 
Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers run-time libraries.
+//
+//===----------------------------------------------------------------------===//
+
+// Low-fat STL-like vector container.
+
+#pragma once
+
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "rsan_defs.hpp"
+
+namespace Robustness {
+
+template<typename T>
+class Vector {
+ public:
+  Vector() : begin_(), end_(), last_() {}
+
+  ~Vector() {
+    if (begin_)
+      InternalFree(begin_);
+  }
+
+  void clear() {
+    if (begin_)
+      InternalFree(begin_);
+    begin_ = 0;
+    end_ = 0;
+    last_ = 0;
+  }
+
+  uptr size() const {
+    return end_ - begin_;
+  }
+
+  bool empty() const {
+    return end_ == begin_;
+  }
+
+  T &operator[](uptr i) {
+    DCHECK_LT(i, end_ - begin_);
+    return begin_[i];
+  }
+
+  const T &operator[](uptr i) const {
+    DCHECK_LT(i, end_ - begin_);
+    return begin_[i];
+  }
+
+  T *push_back() {
+    EnsureSize(size() + 1);
+    T *p = &end_[-1];
+    internal_memset(p, 0, sizeof(*p));
+    return p;
+  }
+
+  T *push_back(const T& v) {
+    EnsureSize(size() + 1);
+    T *p = &end_[-1];
+    internal_memcpy(p, &v, sizeof(*p));
+    return p;
+  }
+
+  T *insert(u64 i, const T& v) {
+    DCHECK_LE(i, end_ - begin_);
+    EnsureSize(size() + 1);
+       auto start = begin_ + i;
+       internal_memmove(start+1, start, ((end_-1) - start) * sizeof(T));
+    T *p = &begin_[i];
+    internal_memcpy(p, &v, sizeof(*p));
+    return p;
+  }
+
+  void pop_back() {
+    DCHECK_GT(end_, begin_);
+    end_--;
+  }
+
+  void resize(uptr size_) {
+    uptr old_size = size();
+    if (size_ <= old_size) {
+      end_ = begin_ + size_;
+      return;
+    }
+    EnsureSize(size_);
+       if (size_ > old_size)
+               internal_memset(&begin_[old_size], 0,
+                               sizeof(T) * (size_ - old_size));
+  }
+
+  void ensureSize(uptr size_){
+         auto oldSize = size();
+         EnsureSize(size_);
+         if (size_ > oldSize)
+                 internal_memset(&begin_[oldSize], 0,
+                                 sizeof(T) * (size_ - oldSize));
+  }
+
+  Vector& operator=(const Vector &w){
+         resize(w.size());
+         internal_memcpy(begin_, w.begin_, w.size()* sizeof(T));
+         return *this;
+  }
+
+  T* begin() const{
+    return begin_;
+  }
+  T* end() const{
+    return end_;
+  }
+  const T* cbegin() const{
+    return begin_;
+  }
+  const T* cend() const{
+    return end_;
+  }
+
+  void reserve(uptr size_){
+    if (size_ <= (uptr)(last_ - begin_)) {
+      return;
+    }
+       uptr oldSize = end_ - begin_;
+    uptr cap0 = last_ - begin_;
+    uptr cap = cap0 * 5 / 4;  // 25% growth
+    if (cap == 0)
+      cap = 16;
+    if (cap < size_)
+      cap = size_;
+    T *p = (T*)InternalAlloc(cap * sizeof(T));
+    if (cap0) {
+      internal_memcpy(p, begin_, oldSize * sizeof(T));
+      InternalFree(begin_);
+    }
+    begin_ = p;
+    end_ = begin_ + oldSize;
+    last_ = begin_ + cap;
+  }
+
+ private:
+  T *begin_;
+  T *end_;
+  T *last_;
+
+  void EnsureSize(uptr size_) {
+    if (size_ <= size())
+      return;
+    if (size_ <= (uptr)(last_ - begin_)) {
+      end_ = begin_ + size_;
+      return;
+    }
+    uptr cap0 = last_ - begin_;
+    uptr cap = cap0 * 5 / 4;  // 25% growth
+    if (cap == 0)
+      cap = 16;
+    if (cap < size_)
+      cap = size_;
+    T *p = (T*)InternalAlloc(cap * sizeof(T));
+    if (cap0) {
+      internal_memcpy(p, begin_, cap0 * sizeof(T));
+      InternalFree(begin_);
+    }
+    begin_ = p;
+    end_ = begin_ + size_;
+    last_ = begin_ + cap;
+  }
+
+  //Vector(const Vector&);
+};
+}  // namespace Robustness
diff --git a/compiler-rt/lib/tsan/rtl/rsan_vectorclock.hpp 
b/compiler-rt/lib/tsan/rtl/rsan_vectorclock.hpp
new file mode 100644
index 0000000000000..0285574c3f95e
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/rsan_vectorclock.hpp
@@ -0,0 +1,115 @@
+#pragma once
+#include "rsan_defs.hpp"
+#include "rsan_vector.h"
+
+namespace Robustness{
+
+template <typename T> class MiniMapClock;
+
+/**
+ * Timestamp
+ */
+template <typename T>
+class Timestamp{
+       public:
+       T key{};
+       timestamp_t ts = 0;
+
+       /// Check if the timestamp is newer than rhs
+       public:
+       bool contains(Timestamp<T> rhs) const{
+               return key == rhs.key && ts >= rhs.ts;
+       }
+       //auto operator<=>(const Timestamp<T>&) const = default;
+};
+       template <typename T> inline bool operator< (const Timestamp<T>& lhs, 
const Timestamp<T>& rhs) { return lhs.key < rhs.key ? true : lhs.key == rhs.key 
? lhs.ts < rhs.ts : false; }
+       template <typename T> inline bool operator==(const Timestamp<T>& lhs, 
const Timestamp<T>& rhs) { return lhs.key == rhs.key && lhs.ts == rhs.ts; }
+       template <typename T> inline bool operator> (const Timestamp<T>& lhs, 
const Timestamp<T>& rhs) { return rhs < lhs; }
+       template <typename T> inline bool operator<=(const Timestamp<T>& lhs, 
const Timestamp<T>& rhs) { return !(lhs > rhs); }
+       template <typename T> inline bool operator>=(const Timestamp<T>& lhs, 
const Timestamp<T>& rhs) { return !(lhs < rhs); }
+       template <typename T> inline bool operator!=(const Timestamp<T>& lhs, 
const Timestamp<T>& rhs) { return !(lhs == rhs); }
+
+template <typename T>
+auto timestamp(T key, timestamp_t ts){
+       return Timestamp<T>{key, ts};
+}
+
+/**
+  Vector Clock
+  **/
+
+template<class T> struct remove_reference { typedef T type; };
+template<class T> struct remove_reference<T&> { typedef T type; };
+template<class T> struct remove_reference<T&&> { typedef T type; };
+
+class VectorClock {
+       private:
+               Robustness::Vector<timestamp_t> impl;
+
+       public:
+               /// Increment a timestamp t in the vector
+               auto inc(LocationId t){
+                       impl.ensureSize(t+1);
+                       timestamp(t, ++impl[t]);
+               }
+               /// Reset the vector clock
+               void reset(){
+                       impl.clear();
+               }
+               void receive(Timestamp<LocationId> ts){
+                       const auto loc = ts.key;
+                       impl.ensureSize(loc+1);
+                       impl[loc] = max(impl[loc], ts.ts);
+               }
+
+               /**
+                 Support
+                 |= Union
+                **/
+               VectorClock& operator|=(const VectorClock &rhs){
+                       auto S1 = impl.size();
+                       auto S2 = rhs.impl.size();
+                       impl.ensureSize(S2);
+                       auto S = min(S1,S2);
+                       uptr i = 0;
+                       for (i = 0; i < S; ++i){
+                               impl[i] = max(impl[i], rhs.impl[i]);
+                       }
+                       for (i = S; i < S2; ++i){
+                               impl[i] = rhs.impl[i];
+                       }
+                       return *this;
+               }
+
+
+               /**
+                 |= - add a timestamp
+                **/
+               auto& operator|=(const Timestamp<LocationId> &rhs){
+                       receive(rhs);
+                       return *this;
+               }
+               bool contains(const VectorClock &rhs) const{
+                       auto S1 = impl.size(), S2 = rhs.impl.size();
+                       decltype(S1) i = 0;
+                       for (; i < S1 && i < S2; ++i)
+                               if (impl[i] < rhs.impl[i])
+                                       return false;
+                       for (; i < S2; ++i)
+                               if (rhs.impl[i] > 0)
+                                       return false;
+                       return true;
+               }
+
+               auto operator[](LocationId t) const {
+                       if (t < impl.size()) {
+                               return timestamp(t, impl[t]);
+                       }
+                       return timestamp(t, 0);
+               }
+
+               bool contains(const Timestamp<LocationId> &rhs) const{
+                       return operator[](rhs.key) >= rhs;
+               }
+};
+} // namespace Robustness
diff --git a/compiler-rt/lib/tsan/rtl/tsan_flags.inc 
b/compiler-rt/lib/tsan/rtl/tsan_flags.inc
index 731d776cc893e..9da7a156eddc8 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_flags.inc
+++ b/compiler-rt/lib/tsan/rtl/tsan_flags.inc
@@ -16,6 +16,9 @@
 // TSAN_FLAG(Type, Name, DefaultValue, Description)
 // See COMMON_FLAG in sanitizer_flags.inc for more details.
 
+TSAN_FLAG(bool, enable_robustness, false,
+          "Enable robustness verification.")
+
 TSAN_FLAG(bool, enable_annotations, true,
           "Enable dynamic annotations, otherwise they are no-ops.")
 // Suppress a race report if we've already output another race report
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp 
b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
index 527e5a9b4a8d8..2c4fe9a7b4b6b 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
@@ -25,6 +25,8 @@
 #include "tsan_interface.h"
 #include "tsan_rtl.h"
 
+#include "rsan_instrument.hpp"
+
 using namespace __tsan;
 
 #if !SANITIZER_GO && __TSAN_HAS_INT128
@@ -226,9 +228,16 @@ namespace {
 
 template <typename T, T (*F)(volatile T *v, T op)>
 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
+  Lock instLock(Robustness::ins.getLockForAddr((Robustness::Address(a))));
+  Robustness::DebugInfo dbg = { .thr = thr, .pc = pc };
+  auto oldValue = *a;
   MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | 
kAccessAtomic);
-  if (LIKELY(mo == mo_relaxed))
-    return F(a, v);
+  if (LIKELY(mo == mo_relaxed)) {
+    auto newValue = F(a, v);
+       if (Robustness::isRobustness())
+               
Robustness::ins.updateRmwStatement(Robustness::Action::AtomicRMWAction{.tid = 
thr->tid, .addr = (Robustness::Address(a)), .mo = mo, .size = AccessSize<T>(), 
.oldValue = static_cast<u64>(oldValue), .newValue=static_cast<u64>(newValue), 
.dbg = move(dbg)});
+    return newValue;
+  }
   SlotLocker locker(thr);
   {
     auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
@@ -241,6 +250,8 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T 
*a, T v, morder mo) {
       thr->clock.Acquire(s->clock);
     v = F(a, v);
   }
+  if (Robustness::isRobustness())
+         
Robustness::ins.updateRmwStatement(Robustness::Action::AtomicRMWAction{.tid = 
thr->tid, .addr = (Robustness::Address(a)), .mo = mo, .size = AccessSize<T>(), 
.oldValue = static_cast<u64>(oldValue), .newValue=static_cast<u64>(v), .dbg = 
move(dbg)});
   if (IsReleaseOrder(mo))
     IncrementEpoch(thr);
   return v;
@@ -262,6 +273,10 @@ struct OpLoad {
   template <typename T>
   static T Atomic(ThreadState *thr, uptr pc, morder mo, const volatile T *a) {
     DCHECK(IsLoadOrder(mo));
+    Lock instLock(Robustness::ins.getLockForAddr((Robustness::Address(a))));
+    Robustness::DebugInfo dbg = { .thr = thr, .pc = pc };
+       if (Robustness::isRobustness())
+               
Robustness::ins.updateLoadStatement(Robustness::Action::AtomicLoadAction{.tid = 
thr->tid, .addr = (Robustness::Address(a)), .mo = mo, .size = AccessSize<T>(), 
.rmw = false, .dbg = move(dbg)});
     // This fast-path is critical for performance.
     // Assume the access is atomic.
     if (!IsAcquireOrder(mo)) {
@@ -303,6 +318,10 @@ struct OpStore {
   template <typename T>
   static void Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) 
{
     DCHECK(IsStoreOrder(mo));
+    Lock instLock(Robustness::ins.getLockForAddr((Robustness::Address(a))));
+    Robustness::DebugInfo dbg = { .thr = thr, .pc = pc };
+       if (Robustness::isRobustness())
+               
Robustness::ins.updateStoreStatement(Robustness::Action::AtomicStoreAction{.tid 
= thr->tid, .addr = (Robustness::Address(a)), .mo = mo, .size = 
AccessSize<T>(), .oldValue = static_cast<u64>(*a), .newValue = 
static_cast<u64>(v), .dbg = move(dbg)});
     MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
                  kAccessWrite | kAccessAtomic);
     // This fast-path is critical for performance.
@@ -438,39 +457,65 @@ struct OpCAS {
     // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
     // (mo_relaxed) when those are used.
     DCHECK(IsLoadOrder(fmo));
+    Lock instLock(Robustness::ins.getLockForAddr((Robustness::Address(a))));
+    Robustness::DebugInfo dbg = { .thr = thr, .pc = pc };
 
     MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
                  kAccessWrite | kAccessAtomic);
+
+    bool success;
+    bool release = IsReleaseOrder(mo);
+    T cc;
+    T pr;
     if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
-      T cc = *c;
-      T pr = func_cas(a, cc, v);
-      if (pr == cc)
-        return true;
+      //T cc = *c;
+      //T pr = func_cas(a, cc, v);
+      cc = *c;
+      pr = func_cas(a, cc, v);
+      if (pr == cc) {
+        success = true;
+               goto cleanup;
+               // return true;
+         }
       *c = pr;
+         success = false;
+         goto cleanup;
       return false;
     }
-    SlotLocker locker(thr);
-    bool release = IsReleaseOrder(mo);
-    bool success;
-    {
-      auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
-      RWLock lock(&s->mtx, release);
-      T cc = *c;
-      T pr = func_cas(a, cc, v);
-      success = pr == cc;
-      if (!success) {
-        *c = pr;
-        mo = fmo;
+       {
+      SlotLocker locker(thr);
+      // bool release = IsReleaseOrder(mo);
+      {
+        auto *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+        RWLock lock(&s->mtx, release);
+        // T cc = *c;
+        // T pr = func_cas(a, cc, v);
+        cc = *c;
+        pr = func_cas(a, cc, v);
+        success = pr == cc;
+        if (!success) {
+          *c = pr;
+          mo = fmo;
+        }
+        if (success && IsAcqRelOrder(mo))
+          thr->clock.ReleaseAcquire(&s->clock);
+        else if (success && IsReleaseOrder(mo))
+          thr->clock.Release(&s->clock);
+        else if (IsAcquireOrder(mo))
+          thr->clock.Acquire(s->clock);
       }
-      if (success && IsAcqRelOrder(mo))
-        thr->clock.ReleaseAcquire(&s->clock);
-      else if (success && IsReleaseOrder(mo))
-        thr->clock.Release(&s->clock);
-      else if (IsAcquireOrder(mo))
-        thr->clock.Acquire(s->clock);
-    }
-    if (success && release)
-      IncrementEpoch(thr);
+      if (success && release)
+        IncrementEpoch(thr);
+       }
+       cleanup:
+       morder correctmo;
+       if (success){
+               correctmo = mo;
+       } else {
+               correctmo = fmo;
+       }
+       if (Robustness::isRobustness())
+               
Robustness::ins.updateCasStatement(Robustness::Action::AtomicCasAction{.tid = 
thr->tid, .addr = (Robustness::Address(a)), .mo = correctmo, .size = 
AccessSize<T>(), .oldValue = static_cast<u64>(cc), .newValue = 
static_cast<u64>(v), .success = success, .dbg = dbg});
     return success;
   }
 
@@ -488,6 +533,14 @@ struct OpFence {
 
   static void Atomic(ThreadState *thr, uptr pc, morder mo) {
     // FIXME(dvyukov): not implemented.
+       if (Robustness::isRobustness()) {
+               if (mo == mo_seq_cst){
+                       (void) 
Robustness::ins.getLockForAddr(Robustness::Address(0)); // Call for side effect
+                       Robustness::ins.updateFenceStatement(thr->tid, mo);
+               } else {
+                       Robustness::ins.updateFenceStatement(thr->tid, mo);
+               }
+       }
     __sync_synchronize();
   }
 };
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp 
b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
index 0ea83fb3b5982..29e531e08bdbc 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
@@ -23,6 +23,8 @@
 #include "tsan_report.h"
 #include "tsan_rtl.h"
 
+#include "rsan_instrument.hpp"
+
 namespace __tsan {
 
 struct MapUnmapCallback {
@@ -276,13 +278,19 @@ void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr 
sz, bool write) {
 }
 
 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
+ Robustness::DebugInfo dbg = { .thr = thr, .pc = pc };
   CHECK_NE(p, (void*)0);
   if (!thr->slot) {
     // Very early/late in thread lifetime, or during fork.
     UNUSED uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, false);
+       if (Robustness::isRobustness())
+         Robustness::ins.freeMemory(Robustness::Action::Free{.tid = thr->tid, 
.addr = p, .size = sz, .dbg = dbg});
     DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr->tid, p, sz);
     return;
   }
+  uptr size = ctx->metamap.FreeBlock(thr->proc(), p, true);
+  if (Robustness::isRobustness())
+    Robustness::ins.freeMemory(Robustness::Action::Free{.tid = thr->tid, .addr 
= p, .size = size, .dbg = dbg});
   SlotLocker locker(thr);
   uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, true);
   DPrintf("#%d: free(0x%zx, %zu)\n", thr->tid, p, sz);
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp 
b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
index 2a8aa1915c9ae..9145060e1d985 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
@@ -20,6 +20,8 @@
 #include "tsan_symbolize.h"
 #include "tsan_platform.h"
 
+#include "rsan_instrument.hpp"
+
 namespace __tsan {
 
 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
@@ -156,6 +158,9 @@ void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 
flagz) {
 }
 
 void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
+  Lock instLock(Robustness::ins.getLockForAddr((Robustness::LocationId) addr));
+  Robustness::DebugInfo dbg = { .thr = thr, .pc = pc };
+ // Note: We treat Mutex as atomic release/acquire var for robustness
   DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
       thr->tid, addr, flagz, rec);
   if (flagz & MutexFlagRecursiveLock)
@@ -204,6 +209,8 @@ void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, 
u32 flagz, int rec) {
       }
     }
   }
+  if (Robustness::isRobustness())
+         
Robustness::ins.updateLoadStatement(Robustness::Action::AtomicLoadAction{.tid = 
thr->tid, .addr = (Robustness::Address(addr)), .mo = mo_acquire, .rmw = false, 
.dbg = dbg});
   if (report_double_lock)
     ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr,
                       creation_stack_id);
@@ -214,6 +221,8 @@ void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, 
u32 flagz, int rec) {
 }
 
 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+  Lock instLock(Robustness::ins.getLockForAddr((Robustness::LocationId) addr));
+  Robustness::DebugInfo dbg = { .thr = thr, .pc = pc };
   DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
   if (pc && IsAppMem(addr))
     MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
@@ -221,6 +230,8 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 
flagz) {
   RecordMutexUnlock(thr, addr);
   bool report_bad_unlock = false;
   int rec = 0;
+  if (Robustness::isRobustness())
+         
Robustness::ins.updateStoreStatement(Robustness::Action::AtomicStoreAction{.tid 
= thr->tid, .addr = (Robustness::Address(addr)), .mo = mo_release, .oldValue = 
0, .dbg = dbg});
   {
     SlotLocker locker(thr);
     auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);

>From 19ba2590d72bd9dc21a30ad49d84f4efdf951f6a Mon Sep 17 00:00:00 2001
From: rymrg <54061433+ry...@users.noreply.github.com>
Date: Tue, 24 Jun 2025 19:01:10 +0300
Subject: [PATCH 2/2] RSan: Use TSan epoch

---
 compiler-rt/lib/tsan/rtl/rsan_defs.hpp        |  8 ++++++-
 .../lib/tsan/rtl/rsan_robustnessmodel.hpp     | 24 +++++++------------
 compiler-rt/lib/tsan/rtl/rsan_vectorclock.hpp |  8 +++----
 3 files changed, 20 insertions(+), 20 deletions(-)

diff --git a/compiler-rt/lib/tsan/rtl/rsan_defs.hpp 
b/compiler-rt/lib/tsan/rtl/rsan_defs.hpp
index c5ca506865090..6b11897ce9a76 100644
--- a/compiler-rt/lib/tsan/rtl/rsan_defs.hpp
+++ b/compiler-rt/lib/tsan/rtl/rsan_defs.hpp
@@ -16,7 +16,13 @@ namespace Robustness{
        using __tsan::s64;
        using __tsan::u64;
        using __tsan::uptr;
-       typedef s64 timestamp_t;
+       using __tsan::Epoch;
+       using __tsan::EpochInc;
+       using __tsan::EpochOverflow;
+       using __tsan::kEpochZero;
+       using __tsan::kEpochOver;
+       using __tsan::kEpochLast;
+       typedef __tsan::Epoch timestamp_t;
        typedef s64 ssize_t;
        typedef u64 uint64_t;
        typedef s64 int64_t;
diff --git a/compiler-rt/lib/tsan/rtl/rsan_robustnessmodel.hpp 
b/compiler-rt/lib/tsan/rtl/rsan_robustnessmodel.hpp
index 4aa0eaa8c9120..d7bc02ff9c745 100644
--- a/compiler-rt/lib/tsan/rtl/rsan_robustnessmodel.hpp
+++ b/compiler-rt/lib/tsan/rtl/rsan_robustnessmodel.hpp
@@ -28,8 +28,8 @@ namespace Robustness {
                };
                //! Location component
                struct Location{
-                       timestamp_t stamp = 0;
-                       timestamp_t stampu = 0;
+                       timestamp_t stamp = kEpochZero;
+                       timestamp_t stampu = kEpochZero;
                        VectorClock m, w;
                        VectorClock mu, wu;
                };
@@ -53,12 +53,12 @@ namespace Robustness {
                 * Memory order is ignored for SC
                 */
                void updateStoreStatement(ThreadId , LocationId a, Thread &ts, 
Location &ls, morder, u64 val){
-                       ls.m |= timestamp(a, ++ls.stamp);
+                       ls.m |= timestamp(a, EpochInc(ls.stamp));
                        ts.v |= ls.m;
                        ls.w = ts.v;
                        ls.m = ts.v;
 
-                       ls.mu |= timestamp(a, ++ls.stampu);
+                       ls.mu |= timestamp(a, EpochInc(ls.stampu));
                        ts.vu |= ls.mu;
                        ls.wu = ts.vu;
                        ls.mu = ts.vu;
@@ -71,7 +71,7 @@ namespace Robustness {
                 */
                void updateRmwStatement(ThreadId t, LocationId a, Thread &ts, 
Location &ls, morder mo, u64 val){
                        //return updateStoreStatement(t, a, ts, ls, mo);
-                       ls.m |= timestamp(a, ++ls.stamp);
+                       ls.m |= timestamp(a, EpochInc(ls.stamp));
                        ts.v |= ls.m;
                        ls.w = ts.v;
                        ls.m = ts.v;
@@ -91,12 +91,6 @@ namespace Robustness {
                timestamp_t getLastTimeStampU(ThreadId , LocationId l, Thread 
&ts, Location &ls) const{
                        return ts.vu[l].ts;
                }
-               timestamp_t getLastTimeStampV(ThreadId , LocationId l, Thread 
&ts, Location &ls, u64 val) const{
-                       return ts.v[l].ts - 1;
-               }
-               timestamp_t getLastTimeStampUV(ThreadId , LocationId l, Thread 
&ts, Location &ls, u64 val) const{
-                       return ts.vu[l].ts - 1;
-               }
 
                //! Remove locations when freeing memory
                void freeLocation(LocationId l, Location &ls){
@@ -138,7 +132,7 @@ namespace Robustness {
                };
                //! Location component
                struct Location{
-                       timestamp_t writeStamp = 0, writeStampU = 0;
+                       timestamp_t writeStamp = kEpochZero, writeStampU = 
kEpochZero;
                        VectorClock w;
                        VectorClock wu;
                };
@@ -162,8 +156,8 @@ namespace Robustness {
 
                //! Update store statement
                void updateStoreStatement(ThreadId t, LocationId a, Thread &ts, 
Location &ls, morder mo, uint64_t oldValue){
-                       const auto timestampV =  timestamp(a, ++ls.writeStamp);
-                       const auto timestampVU = timestamp(a, ++ls.writeStampU);
+                       const auto timestampV =  timestamp(a, 
EpochInc(ls.writeStamp));
+                       const auto timestampVU = timestamp(a, 
EpochInc(ls.writeStampU));
                        ls.w  |= timestampV;
                        ls.wu |= timestampVU;
                        ts.va |= timestampV;
@@ -185,7 +179,7 @@ namespace Robustness {
 
                //! Update RMW statement
                void updateRmwStatement(ThreadId t, LocationId a, Thread &ts, 
Location &ls, morder mo, uint64_t oldValue){
-                       const auto timestampV =  timestamp(a, ++ls.writeStamp);
+                       const auto timestampV =  timestamp(a, 
EpochInc(ls.writeStamp));
                        ls.w  |= timestampV;
                        ts.va |= timestampV;
                        ts.vc |= timestampV;
diff --git a/compiler-rt/lib/tsan/rtl/rsan_vectorclock.hpp 
b/compiler-rt/lib/tsan/rtl/rsan_vectorclock.hpp
index 0285574c3f95e..b34ec789080b1 100644
--- a/compiler-rt/lib/tsan/rtl/rsan_vectorclock.hpp
+++ b/compiler-rt/lib/tsan/rtl/rsan_vectorclock.hpp
@@ -13,7 +13,7 @@ template <typename T>
 class Timestamp{
        public:
        T key{};
-       timestamp_t ts = 0;
+       timestamp_t ts = kEpochZero;
 
        /// Check if the timestamp is newer than rhs
        public:
@@ -50,7 +50,7 @@ class VectorClock {
                /// Increment a timestamp t in the vector
                auto inc(LocationId t){
                        impl.ensureSize(t+1);
-                       timestamp(t, ++impl[t]);
+                       timestamp(t, EpochInc(impl[t]));
                }
                /// Reset the vector clock
                void reset(){
@@ -96,7 +96,7 @@ class VectorClock {
                                if (impl[i] < rhs.impl[i])
                                        return false;
                        for (; i < S2; ++i)
-                               if (rhs.impl[i] > 0)
+                               if (rhs.impl[i] > kEpochZero)
                                        return false;
                        return true;
                }
@@ -105,7 +105,7 @@ class VectorClock {
                        if (t < impl.size()) {
                                return timestamp(t, impl[t]);
                        }
-                       return timestamp(t, 0);
+                       return timestamp(t, kEpochZero);
                }
 
                bool contains(const Timestamp<LocationId> &rhs) const{

_______________________________________________
llvm-branch-commits mailing list
llvm-branch-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to