https://github.com/Lancern updated 
https://github.com/llvm/llvm-project/pull/153814

>From aba98c654053fc43e5470f360ee3f0713bb8236e Mon Sep 17 00:00:00 2001
From: Sirui Mu <msrlanc...@gmail.com>
Date: Fri, 15 Aug 2025 22:32:52 +0800
Subject: [PATCH] [CIR] Add atomic load and store

This patch adds support for atomic loads and stores. Specifically, it adds
support for the following intrinsic calls:

- `__atomic_load` and `__atomic_store`;
- `__c11_atomic_load` and `__c11_atomic_store`.
---
 .../CIR/Dialect/Builder/CIRBaseBuilder.h      |  12 +-
 clang/include/clang/CIR/Dialect/IR/CIROps.td  |  23 +-
 .../clang/CIR/Dialect/IR/CIROpsEnums.h        |  12 +
 clang/include/clang/CIR/MissingFeatures.h     |   3 +-
 clang/lib/CIR/CodeGen/Address.h               |   6 +
 clang/lib/CIR/CodeGen/CIRGenAtomic.cpp        | 225 +++++++++++++++++-
 clang/lib/CIR/CodeGen/CIRGenBuilder.h         |  12 +-
 clang/lib/CIR/CodeGen/CIRGenFunction.h        |   2 +-
 .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp |  42 +++-
 clang/test/CIR/CodeGen/atomic.c               | 106 +++++++++
 10 files changed, 415 insertions(+), 28 deletions(-)

diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h 
b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
index 1def6457e156c..693e44cbd0979 100644
--- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
+++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
@@ -161,9 +161,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
                          uint64_t alignment = 0) {
     mlir::IntegerAttr alignmentAttr = getAlignmentAttr(alignment);
     assert(!cir::MissingFeatures::opLoadStoreVolatile());
-    assert(!cir::MissingFeatures::opLoadStoreMemOrder());
     return cir::LoadOp::create(*this, loc, ptr, /*isDeref=*/false,
-                               alignmentAttr);
+                               alignmentAttr, cir::MemOrderAttr{});
   }
 
   mlir::Value createAlignedLoad(mlir::Location loc, mlir::Value ptr,
@@ -245,8 +244,10 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
   }
 
   cir::StoreOp createStore(mlir::Location loc, mlir::Value val, mlir::Value 
dst,
-                           mlir::IntegerAttr align = {}) {
-    return create<cir::StoreOp>(loc, val, dst, align);
+                           bool isVolatile = false,
+                           mlir::IntegerAttr align = {},
+                           cir::MemOrderAttr order = {}) {
+    return cir::StoreOp::create(*this, loc, val, dst, align, order);
   }
 
   [[nodiscard]] cir::GlobalOp createGlobal(mlir::ModuleOp mlirModule,
@@ -269,7 +270,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
                                clang::CharUnits alignment) {
     mlir::IntegerAttr alignmentAttr = getAlignmentAttr(alignment);
     auto addr = createAlloca(loc, getPointerTo(type), type, {}, alignmentAttr);
-    return create<cir::LoadOp>(loc, addr, /*isDeref=*/false, alignmentAttr);
+    return cir::LoadOp::create(*this, loc, addr, /*isDeref=*/false,
+                               alignmentAttr, /*mem_order=*/{});
   }
 
   cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base,
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td 
b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 369bcb1ddb1bb..d7628f9d7deea 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -299,6 +299,20 @@ def CIR_ConstantOp : CIR_Op<"const", [
   let hasFolder = 1;
 }
 
+//===----------------------------------------------------------------------===//
+// C/C++ memory order definitions
+//===----------------------------------------------------------------------===//
+
+def CIR_MemOrder : CIR_I32EnumAttr<
+  "MemOrder", "Memory order according to C++11 memory model", [
+    I32EnumAttrCase<"Relaxed", 0, "relaxed">,
+    I32EnumAttrCase<"Consume", 1, "consume">,
+    I32EnumAttrCase<"Acquire", 2, "acquire">,
+    I32EnumAttrCase<"Release", 3, "release">,
+    I32EnumAttrCase<"AcquireRelease", 4, "acq_rel">,
+    I32EnumAttrCase<"SequentiallyConsistent", 5, "seq_cst">
+]>;
+
 
//===----------------------------------------------------------------------===//
 // AllocaOp
 
//===----------------------------------------------------------------------===//
@@ -408,13 +422,14 @@ def CIR_LoadOp : CIR_Op<"load", [
   let arguments = (ins Arg<CIR_PointerType, "the address to load from",
                            [MemRead]>:$addr,
                        UnitAttr:$isDeref,
-                       OptionalAttr<I64Attr>:$alignment
-                       );
+                       OptionalAttr<I64Attr>:$alignment,
+                       OptionalAttr<CIR_MemOrder>:$mem_order);
   let results = (outs CIR_AnyType:$result);
 
   let assemblyFormat = [{
     (`deref` $isDeref^)?
     (`align` `(` $alignment^ `)`)?
+    (`atomic` `(` $mem_order^ `)`)?
     $addr `:` qualified(type($addr)) `,` type($result) attr-dict
   }];
 
@@ -451,10 +466,12 @@ def CIR_StoreOp : CIR_Op<"store", [
   let arguments = (ins CIR_AnyType:$value,
                        Arg<CIR_PointerType, "the address to store the value",
                            [MemWrite]>:$addr,
-                           OptionalAttr<I64Attr>:$alignment);
+                       OptionalAttr<I64Attr>:$alignment,
+                       OptionalAttr<CIR_MemOrder>:$mem_order);
 
   let assemblyFormat = [{
     (`align` `(` $alignment^ `)`)?
+    (`atomic` `(` $mem_order^ `)`)?
     $value `,` $addr attr-dict `:` type($value) `,` qualified(type($addr))
   }];
 
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h 
b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h
index fead5725d183d..17fddaee871b3 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h
+++ b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h
@@ -113,6 +113,18 @@ LLVM_ATTRIBUTE_UNUSED static bool 
isValidLinkage(GlobalLinkageKind gl) {
          isLinkOnceLinkage(gl);
 }
 
+bool operator<(cir::MemOrder, cir::MemOrder) = delete;
+bool operator>(cir::MemOrder, cir::MemOrder) = delete;
+bool operator<=(cir::MemOrder, cir::MemOrder) = delete;
+bool operator>=(cir::MemOrder, cir::MemOrder) = delete;
+
+// Validate an integral value which isn't known to fit within the enum's range
+// is a valid AtomicOrderingCABI.
+template <typename Int> inline bool isValidCIRAtomicOrderingCABI(Int value) {
+  return static_cast<Int>(cir::MemOrder::Relaxed) <= value &&
+         value <= static_cast<Int>(cir::MemOrder::SequentiallyConsistent);
+}
+
 } // namespace cir
 
 #endif // CLANG_CIR_DIALECT_IR_CIROPSENUMS_H
diff --git a/clang/include/clang/CIR/MissingFeatures.h 
b/clang/include/clang/CIR/MissingFeatures.h
index 8626ed920b678..a3c63d387ff3e 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -49,7 +49,6 @@ struct MissingFeatures {
   static bool opLoadEmitScalarRangeCheck() { return false; }
   static bool opLoadBooleanRepresentation() { return false; }
   static bool opLoadStoreTbaa() { return false; }
-  static bool opLoadStoreMemOrder() { return false; }
   static bool opLoadStoreVolatile() { return false; }
   static bool opLoadStoreAtomic() { return false; }
   static bool opLoadStoreObjC() { return false; }
@@ -163,6 +162,8 @@ struct MissingFeatures {
   static bool atomicInfoGetAtomicPointer() { return false; }
   static bool atomicInfoGetAtomicAddress() { return false; }
   static bool atomicUseLibCall() { return false; }
+  static bool atomicScope() { return false; }
+  static bool atomicSyncScopeID() { return false; }
 
   // Misc
   static bool abiArgInfo() { return false; }
diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h
index 6c927e9eda9cc..a851d06321cc1 100644
--- a/clang/lib/CIR/CodeGen/Address.h
+++ b/clang/lib/CIR/CodeGen/Address.h
@@ -68,6 +68,12 @@ class Address {
     return pointerAndKnownNonNull.getPointer() != nullptr;
   }
 
+  /// Return address with different pointer, but same element type and
+  /// alignment.
+  Address withPointer(mlir::Value newPtr) const {
+    return Address(newPtr, getElementType(), getAlignment());
+  }
+
   /// Return address with different element type, a bitcast pointer, and
   /// the same alignment.
   Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const;
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp 
b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index 979085f037d4f..a20b1e5be5a5b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -96,6 +96,15 @@ class AtomicInfo {
 
   bool emitMemSetZeroIfNecessary() const;
 
+  /// Cast the given pointer to an integer pointer suitable for atomic
+  /// operations on the source.
+  Address castToAtomicIntPointer(Address addr) const;
+
+  /// If addr is compatible with the iN that will be used for an atomic
+  /// operation, bitcast it. Otherwise, create a temporary that is suitable and
+  /// copy the value across.
+  Address convertToAtomicIntPointer(Address addr) const;
+
   /// Copy an atomic r-value into atomic-layout memory.
   void emitCopyIntoMemory(RValue rvalue) const;
 
@@ -111,11 +120,24 @@ class AtomicInfo {
     return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
   }
 
+  /// Creates temp alloca for intermediate operations on atomic value.
+  Address createTempAlloca() const;
+
 private:
   bool requiresMemSetZero(mlir::Type ty) const;
 };
 } // namespace
 
+// This function emits any expression (scalar, complex, or aggregate)
+// into a temporary alloca.
+static Address emitValToTemp(CIRGenFunction &cgf, Expr *e) {
+  Address declPtr = cgf.createMemTemp(
+      e->getType(), cgf.getLoc(e->getSourceRange()), ".atomictmp");
+  cgf.emitAnyExprToMem(e, declPtr, e->getType().getQualifiers(),
+                       /*Init*/ true);
+  return declPtr;
+}
+
 /// Does a store of the given IR type modify the full expected width?
 static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty,
                            uint64_t expectedSize) {
@@ -147,6 +169,41 @@ bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const {
   llvm_unreachable("bad evaluation kind");
 }
 
+Address AtomicInfo::convertToAtomicIntPointer(Address addr) const {
+  mlir::Type ty = addr.getElementType();
+  uint64_t sourceSizeInBits = cgf.cgm.getDataLayout().getTypeSizeInBits(ty);
+  if (sourceSizeInBits != atomicSizeInBits) {
+    cgf.cgm.errorNYI(
+        loc,
+        "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
+  }
+
+  return castToAtomicIntPointer(addr);
+}
+
+Address AtomicInfo::createTempAlloca() const {
+  Address tempAlloca = cgf.createMemTemp(
+      (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
+                                                                  : atomicTy,
+      getAtomicAlignment(), loc, "atomic-temp");
+
+  // Cast to pointer to value type for bitfields.
+  if (lvalue.isBitField()) {
+    cgf.cgm.errorNYI(loc, "AtomicInfo::createTempAlloca: bitfield lvalue");
+  }
+
+  return tempAlloca;
+}
+
+Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
+  auto intTy = mlir::dyn_cast<cir::IntType>(addr.getElementType());
+  // Don't bother with int casts if the integer size is the same.
+  if (intTy && intTy.getWidth() == atomicSizeInBits)
+    return addr;
+  auto ty = cgf.getBuilder().getUIntNTy(atomicSizeInBits);
+  return addr.withElementType(cgf.getBuilder(), ty);
+}
+
 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
   assert(lvalue.isSimple());
   Address addr = lvalue.getAddress();
@@ -187,12 +244,78 @@ void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
   }
 }
 
+static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
+                         Address ptr, Address val1, uint64_t size,
+                         cir::MemOrder order) {
+  std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
+  if (scopeModel) {
+    assert(!cir::MissingFeatures::atomicScope());
+    cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: atomic scope");
+    return;
+  }
+
+  assert(!cir::MissingFeatures::atomicSyncScopeID());
+
+  CIRGenBuilderTy &builder = cgf.getBuilder();
+  mlir::Location loc = cgf.getLoc(expr->getSourceRange());
+  auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
+
+  switch (expr->getOp()) {
+  default:
+    cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
+    break;
+
+  case AtomicExpr::AO__c11_atomic_init:
+    llvm_unreachable("already handled!");
+
+  case AtomicExpr::AO__c11_atomic_load:
+  case AtomicExpr::AO__atomic_load: {
+    cir::LoadOp load =
+        builder.createLoad(loc, ptr, /*isVolatile=*/expr->isVolatile());
+
+    assert(!cir::MissingFeatures::atomicSyncScopeID());
+
+    load->setAttr("mem_order", orderAttr);
+
+    builder.createStore(loc, load->getResult(0), dest);
+    return;
+  }
+
+  case AtomicExpr::AO__c11_atomic_store:
+  case AtomicExpr::AO__atomic_store: {
+    cir::LoadOp loadVal1 = builder.createLoad(loc, val1);
+
+    assert(!cir::MissingFeatures::atomicSyncScopeID());
+
+    builder.createStore(loc, loadVal1, ptr, expr->isVolatile(),
+                        /*align=*/mlir::IntegerAttr{}, orderAttr);
+    return;
+  }
+  }
+}
+
+static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
+  if (!cir::isValidCIRAtomicOrderingCABI(order))
+    return false;
+  auto memOrder = static_cast<cir::MemOrder>(order);
+  if (isStore)
+    return memOrder != cir::MemOrder::Consume &&
+           memOrder != cir::MemOrder::Acquire &&
+           memOrder != cir::MemOrder::AcquireRelease;
+  if (isLoad)
+    return memOrder != cir::MemOrder::Release &&
+           memOrder != cir::MemOrder::AcquireRelease;
+  return true;
+}
+
 RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
   QualType atomicTy = e->getPtr()->getType()->getPointeeType();
   QualType memTy = atomicTy;
   if (const auto *ty = atomicTy->getAs<AtomicType>())
     memTy = ty->getValueType();
 
+  Address val1 = Address::invalid();
+  Address dest = Address::invalid();
   Address ptr = emitPointerWithAlignment(e->getPtr());
 
   assert(!cir::MissingFeatures::openCL());
@@ -202,9 +325,105 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
     return RValue::get(nullptr);
   }
 
-  assert(!cir::MissingFeatures::atomicExpr());
-  cgm.errorNYI(e->getSourceRange(), "atomic expr is NYI");
-  return RValue::get(nullptr);
+  TypeInfoChars typeInfo = getContext().getTypeInfoInChars(atomicTy);
+  uint64_t size = typeInfo.Width.getQuantity();
+
+  Expr::EvalResult orderConst;
+  mlir::Value order;
+  if (!e->getOrder()->EvaluateAsInt(orderConst, getContext()))
+    order = emitScalarExpr(e->getOrder());
+
+  bool shouldCastToIntPtrTy = true;
+
+  switch (e->getOp()) {
+  default:
+    cgm.errorNYI(e->getSourceRange(), "atomic op NYI");
+    return RValue::get(nullptr);
+
+  case AtomicExpr::AO__c11_atomic_init:
+    llvm_unreachable("already handled above with emitAtomicInit");
+
+  case AtomicExpr::AO__c11_atomic_load:
+    break;
+
+  case AtomicExpr::AO__atomic_load:
+    dest = emitPointerWithAlignment(e->getVal1());
+    break;
+
+  case AtomicExpr::AO__atomic_store:
+    val1 = emitPointerWithAlignment(e->getVal1());
+    break;
+
+  case AtomicExpr::AO__c11_atomic_store:
+    val1 = emitValToTemp(*this, e->getVal1());
+    break;
+  }
+
+  QualType resultTy = e->getType().getUnqualifiedType();
+
+  // The inlined atomics only function on iN types, where N is a power of 2. We
+  // need to make sure (via temporaries if necessary) that all incoming values
+  // are compatible.
+  LValue atomicValue = makeAddrLValue(ptr, atomicTy);
+  AtomicInfo atomics(*this, atomicValue, getLoc(e->getSourceRange()));
+
+  if (shouldCastToIntPtrTy) {
+    ptr = atomics.castToAtomicIntPointer(ptr);
+    if (val1.isValid())
+      val1 = atomics.convertToAtomicIntPointer(val1);
+  }
+  if (dest.isValid()) {
+    if (shouldCastToIntPtrTy)
+      dest = atomics.castToAtomicIntPointer(dest);
+  } else if (!resultTy->isVoidType()) {
+    dest = atomics.createTempAlloca();
+    if (shouldCastToIntPtrTy)
+      dest = atomics.castToAtomicIntPointer(dest);
+  }
+
+  bool powerOf2Size = (size & (size - 1)) == 0;
+  bool useLibCall = !powerOf2Size || (size > 16);
+
+  // For atomics larger than 16 bytes, emit a libcall from the frontend. This
+  // avoids the overhead of dealing with excessively-large value types in IR.
+  // Non-power-of-2 values also lower to libcall here, as they are not 
currently
+  // permitted in IR instructions (although that constraint could be relaxed in
+  // the future). For other cases where a libcall is required on a given
+  // platform, we let the backend handle it (this includes handling for all of
+  // the size-optimized libcall variants, which are only valid up to 16 bytes.)
+  //
+  // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
+  if (useLibCall) {
+    assert(!cir::MissingFeatures::atomicUseLibCall());
+    cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: emit atomic lib call");
+    return RValue::get(nullptr);
+  }
+
+  bool isStore = e->getOp() == AtomicExpr::AO__c11_atomic_store ||
+                 e->getOp() == AtomicExpr::AO__atomic_store;
+  bool isLoad = e->getOp() == AtomicExpr::AO__c11_atomic_load ||
+                e->getOp() == AtomicExpr::AO__atomic_load;
+
+  if (!order) {
+    // We have evaluated the memory order as an integer constant in orderConst.
+    // We should not ever get to a case where the ordering isn't a valid CABI
+    // value, but it's hard to enforce that in general.
+    uint64_t ord = orderConst.Val.getInt().getZExtValue();
+    if (isMemOrderValid(ord, isStore, isLoad))
+      emitAtomicOp(*this, e, dest, ptr, val1, size,
+                   static_cast<cir::MemOrder>(ord));
+  } else {
+    assert(!cir::MissingFeatures::atomicExpr());
+    cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: dynamic memory order");
+    return RValue::get(nullptr);
+  }
+
+  if (resultTy->isVoidType())
+    return RValue::get(nullptr);
+
+  return convertTempToRValue(
+      dest.withElementType(builder, convertTypeForMem(resultTy)), resultTy,
+      e->getExprLoc());
 }
 
 void CIRGenFunction::emitAtomicInit(Expr *init, LValue dest) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h 
b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index c1088c4cd0821..20f84b75386f9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -356,15 +356,19 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
   cir::LoadOp createLoad(mlir::Location loc, Address addr,
                          bool isVolatile = false) {
     mlir::IntegerAttr align = getAlignmentAttr(addr.getAlignment());
-    return create<cir::LoadOp>(loc, addr.getPointer(), /*isDeref=*/false,
-                               align);
+    return cir::LoadOp::create(*this, loc, addr.getPointer(), 
/*isDeref=*/false,
+                               /*alignment=*/align,
+                               /*mem_order=*/cir::MemOrderAttr{});
   }
 
   cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst,
-                           mlir::IntegerAttr align = {}) {
+                           bool isVolatile = false,
+                           mlir::IntegerAttr align = {},
+                           cir::MemOrderAttr order = {}) {
     if (!align)
       align = getAlignmentAttr(dst.getAlignment());
-    return CIRBaseBuilderTy::createStore(loc, val, dst.getPointer(), align);
+    return CIRBaseBuilderTy::createStore(loc, val, dst.getPointer(), 
isVolatile,
+                                         align, order);
   }
 
   /// Create a cir.complex.real_ptr operation that derives a pointer to the 
real
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h 
b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 0f1af4dcb801f..0313ab7aeaa71 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -1446,7 +1446,7 @@ class CIRGenFunction : public CIRGenTypeCache {
       mlir::OpBuilder::InsertionGuard guard(builder);
       builder.restoreInsertionPoint(outermostConditional->getInsertPoint());
       builder.createStore(
-          value.getLoc(), value, addr,
+          value.getLoc(), value, addr, /*isVolatile=*/false,
           mlir::IntegerAttr::get(
               mlir::IntegerType::get(value.getContext(), 64),
               (uint64_t)addr.getAlignment().getAsAlign().value()));
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp 
b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 9ab7178e9ab12..007eb4053d872 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -1149,12 +1149,33 @@ mlir::LogicalResult 
CIRToLLVMFrameAddrOpLowering::matchAndRewrite(
   return mlir::success();
 }
 
+static mlir::LLVM::AtomicOrdering
+getLLVMMemOrder(std::optional<cir::MemOrder> memorder) {
+  if (!memorder)
+    return mlir::LLVM::AtomicOrdering::not_atomic;
+  switch (*memorder) {
+  case cir::MemOrder::Relaxed:
+    return mlir::LLVM::AtomicOrdering::monotonic;
+  case cir::MemOrder::Consume:
+  case cir::MemOrder::Acquire:
+    return mlir::LLVM::AtomicOrdering::acquire;
+  case cir::MemOrder::Release:
+    return mlir::LLVM::AtomicOrdering::release;
+  case cir::MemOrder::AcquireRelease:
+    return mlir::LLVM::AtomicOrdering::acq_rel;
+  case cir::MemOrder::SequentiallyConsistent:
+    return mlir::LLVM::AtomicOrdering::seq_cst;
+  default:
+    llvm_unreachable("unknown memory order");
+  }
+}
+
 mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite(
     cir::LoadOp op, OpAdaptor adaptor,
     mlir::ConversionPatternRewriter &rewriter) const {
   const mlir::Type llvmTy =
       convertTypeForMemory(*getTypeConverter(), dataLayout, op.getType());
-  assert(!cir::MissingFeatures::opLoadStoreMemOrder());
+  mlir::LLVM::AtomicOrdering ordering = getLLVMMemOrder(op.getMemOrder());
   std::optional<size_t> opAlign = op.getAlignment();
   unsigned alignment =
       (unsigned)opAlign.value_or(dataLayout.getTypeABIAlignment(llvmTy));
@@ -1163,11 +1184,10 @@ mlir::LogicalResult 
CIRToLLVMLoadOpLowering::matchAndRewrite(
 
   // TODO: nontemporal, syncscope.
   assert(!cir::MissingFeatures::opLoadStoreVolatile());
-  mlir::LLVM::LoadOp newLoad = rewriter.create<mlir::LLVM::LoadOp>(
-      op->getLoc(), llvmTy, adaptor.getAddr(), alignment,
-      /*volatile=*/false, /*nontemporal=*/false,
-      /*invariant=*/false, /*invariantGroup=*/false,
-      mlir::LLVM::AtomicOrdering::not_atomic);
+  mlir::LLVM::LoadOp newLoad = mlir::LLVM::LoadOp::create(
+      rewriter, op->getLoc(), llvmTy, adaptor.getAddr(), alignment,
+      /*isVolatile=*/false, /*isNonTemporal=*/false,
+      /*isInvariant=*/false, /*isInvariantGroup=*/false, ordering);
 
   // Convert adapted result to its original type if needed.
   mlir::Value result =
@@ -1180,7 +1200,7 @@ mlir::LogicalResult 
CIRToLLVMLoadOpLowering::matchAndRewrite(
 mlir::LogicalResult CIRToLLVMStoreOpLowering::matchAndRewrite(
     cir::StoreOp op, OpAdaptor adaptor,
     mlir::ConversionPatternRewriter &rewriter) const {
-  assert(!cir::MissingFeatures::opLoadStoreMemOrder());
+  mlir::LLVM::AtomicOrdering memorder = getLLVMMemOrder(op.getMemOrder());
   const mlir::Type llvmTy =
       getTypeConverter()->convertType(op.getValue().getType());
   std::optional<size_t> opAlign = op.getAlignment();
@@ -1194,10 +1214,10 @@ mlir::LogicalResult 
CIRToLLVMStoreOpLowering::matchAndRewrite(
                                    op.getValue().getType(), 
adaptor.getValue());
   // TODO: nontemporal, syncscope.
   assert(!cir::MissingFeatures::opLoadStoreVolatile());
-  mlir::LLVM::StoreOp storeOp = rewriter.create<mlir::LLVM::StoreOp>(
-      op->getLoc(), value, adaptor.getAddr(), alignment, /*volatile=*/false,
-      /*nontemporal=*/false, /*invariantGroup=*/false,
-      mlir::LLVM::AtomicOrdering::not_atomic);
+  mlir::LLVM::StoreOp storeOp = mlir::LLVM::StoreOp::create(
+      rewriter, op->getLoc(), value, adaptor.getAddr(), alignment,
+      /*isVolatile=*/false,
+      /*isNonTemporal=*/false, /*isInvariantGroup=*/false, memorder);
   rewriter.replaceOp(op, storeOp);
   assert(!cir::MissingFeatures::opLoadStoreTbaa());
   return mlir::LogicalResult::success();
diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c
index 8db4ae43d7389..6c37fb7cd432d 100644
--- a/clang/test/CIR/CodeGen/atomic.c
+++ b/clang/test/CIR/CodeGen/atomic.c
@@ -45,3 +45,109 @@ void f2(void) {
 // OGCG:         %[[SLOT:.+]] = alloca i32, align 4
 // OGCG-NEXT:    store i32 42, ptr %[[SLOT]], align 4
 // OGCG:       }
+
+void load(int *ptr) {
+  int x;
+  __atomic_load(ptr, &x, __ATOMIC_RELAXED);
+  __atomic_load(ptr, &x, __ATOMIC_CONSUME);
+  __atomic_load(ptr, &x, __ATOMIC_ACQUIRE);
+  __atomic_load(ptr, &x, __ATOMIC_SEQ_CST);
+}
+
+// CIR-LABEL: @load
+// CIR:   %{{.+}} = cir.load align(4) atomic(relaxed) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) atomic(consume) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) atomic(acquire) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) atomic(seq_cst) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
+// CIR: }
+
+// LLVM-LABEL: @load
+// LLVM:   %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
+// LLVM:   %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// LLVM:   %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// LLVM:   %{{.+}} = load atomic i32, ptr %{{.+}} seq_cst, align 4
+// LLVM: }
+
+// OGCG-LABEL: @load
+// OGCG:   %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
+// OGCG:   %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// OGCG:   %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// OGCG:   %{{.+}} = load atomic i32, ptr %{{.+}} seq_cst, align 4
+// OGCG: }
+
+void c11_load(_Atomic(int) *ptr) {
+  __c11_atomic_load(ptr, __ATOMIC_RELAXED);
+  __c11_atomic_load(ptr, __ATOMIC_CONSUME);
+  __c11_atomic_load(ptr, __ATOMIC_ACQUIRE);
+  __c11_atomic_load(ptr, __ATOMIC_SEQ_CST);
+}
+
+// CIR-LABEL: @c11_load
+// CIR:   %{{.+}} = cir.load align(4) atomic(relaxed) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) atomic(consume) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) atomic(acquire) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
+// CIR:   %{{.+}} = cir.load align(4) atomic(seq_cst) %{{.+}} : 
!cir.ptr<!s32i>, !s32i
+// CIR: }
+
+// LLVM-LABEL: @c11_load
+// LLVM:   %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
+// LLVM:   %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// LLVM:   %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// LLVM:   %{{.+}} = load atomic i32, ptr %{{.+}} seq_cst, align 4
+// LLVM: }
+
+// OGCG-LABEL: @c11_load
+// OGCG:   %{{.+}} = load atomic i32, ptr %{{.+}} monotonic, align 4
+// OGCG:   %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// OGCG:   %{{.+}} = load atomic i32, ptr %{{.+}} acquire, align 4
+// OGCG:   %{{.+}} = load atomic i32, ptr %{{.+}} seq_cst, align 4
+// OGCG: }
+
+void store(int *ptr, int x) {
+  __atomic_store(ptr, &x, __ATOMIC_RELAXED);
+  __atomic_store(ptr, &x, __ATOMIC_RELEASE);
+  __atomic_store(ptr, &x, __ATOMIC_SEQ_CST);
+}
+
+// CIR-LABEL: @store
+// CIR:   cir.store align(4) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, 
!cir.ptr<!s32i>
+// CIR:   cir.store align(4) atomic(release) %{{.+}}, %{{.+}} : !s32i, 
!cir.ptr<!s32i>
+// CIR:   cir.store align(4) atomic(seq_cst) %{{.+}}, %{{.+}} : !s32i, 
!cir.ptr<!s32i>
+// CIR: }
+
+// LLVM-LABEL: @store
+// LLVM:   store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4
+// LLVM:   store atomic i32 %{{.+}}, ptr %{{.+}} release, align 4
+// LLVM:   store atomic i32 %{{.+}}, ptr %{{.+}} seq_cst, align 4
+// LLVM: }
+
+// OGCG-LABEL: @store
+// OGCG:   store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4
+// OGCG:   store atomic i32 %{{.+}}, ptr %{{.+}} release, align 4
+// OGCG:   store atomic i32 %{{.+}}, ptr %{{.+}} seq_cst, align 4
+// OGCG: }
+
+void c11_store(_Atomic(int) *ptr, int x) {
+  __c11_atomic_store(ptr, x, __ATOMIC_RELAXED);
+  __c11_atomic_store(ptr, x, __ATOMIC_RELEASE);
+  __c11_atomic_store(ptr, x, __ATOMIC_SEQ_CST);
+}
+
+// CIR-LABEL: @c11_store
+// CIR:   cir.store align(4) atomic(relaxed) %{{.+}}, %{{.+}} : !s32i, 
!cir.ptr<!s32i>
+// CIR:   cir.store align(4) atomic(release) %{{.+}}, %{{.+}} : !s32i, 
!cir.ptr<!s32i>
+// CIR:   cir.store align(4) atomic(seq_cst) %{{.+}}, %{{.+}} : !s32i, 
!cir.ptr<!s32i>
+// CIR: }
+
+// LLVM-LABEL: @c11_store
+// LLVM:   store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4
+// LLVM:   store atomic i32 %{{.+}}, ptr %{{.+}} release, align 4
+// LLVM:   store atomic i32 %{{.+}}, ptr %{{.+}} seq_cst, align 4
+// LLVM: }
+
+// OGCG-LABEL: @c11_store
+// OGCG:   store atomic i32 %{{.+}}, ptr %{{.+}} monotonic, align 4
+// OGCG:   store atomic i32 %{{.+}}, ptr %{{.+}} release, align 4
+// OGCG:   store atomic i32 %{{.+}}, ptr %{{.+}} seq_cst, align 4
+// OGCG: }
+

_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to