https://github.com/Lancern updated 
https://github.com/llvm/llvm-project/pull/158089

>From 78170c9ba16b6d33c0258290fad201915f2ab365 Mon Sep 17 00:00:00 2001
From: Sirui Mu <msrlanc...@gmail.com>
Date: Thu, 11 Sep 2025 22:48:33 +0800
Subject: [PATCH] [CIR] Add atomic exchange operation

This patch adds atomic exchange operation which covers the following C/C++
intrinsic functions:

  - `__c11_atomic_exchange`
  - `__atomic_exchange`
  - `__atomic_exchange_n`
---
 clang/include/clang/CIR/Dialect/IR/CIROps.td  | 39 ++++++++
 clang/lib/CIR/CodeGen/CIRGenAtomic.cpp        | 34 ++++++-
 .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 12 +++
 .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h   | 10 ++
 clang/test/CIR/CodeGen/atomic.c               | 99 +++++++++++++++++++
 clang/test/CIR/IR/atomic.cir                  | 21 ++++
 6 files changed, 211 insertions(+), 4 deletions(-)
 create mode 100644 clang/test/CIR/IR/atomic.cir

diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td 
b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 38c4a87f69d6d..3d2479dfa4a4b 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -4053,6 +4053,45 @@ def CIR_ThrowOp : CIR_Op<"throw"> {
 // Atomic operations
 
//===----------------------------------------------------------------------===//
 
+def CIR_AtomicXchg : CIR_Op<"atomic.xchg", [
+  AllTypesMatch<["result", "val"]>,
+  TypesMatchWith<"type of 'val' must match the pointee type of 'ptr'",
+    "ptr", "val", "mlir::cast<cir::PointerType>($_self).getPointee()">
+]> {
+  let summary = "Atomic exchange";
+  let description = [{
+    C/C++ atomic exchange operation. This operation implements the C/C++
+    builtin function `__atomic_exchange`, `__atomic_exchange_n`, and
+    `__c11_atomic_exchange`.
+
+    This operation takes two arguments: a pointer `ptr` and a value `val`. The
+    operation atomically replaces the value of the object pointed-to by `ptr`
+    with `val`, and returns the original value of the object.
+
+    Example:
+
+    ```mlir
+    %res = cir.atomic.xchg(%ptr : !cir.ptr<!u64i>,
+                           %val : !u64i,
+                           seq_cst) : !u64i
+    ```
+  }];
+
+  let results = (outs CIR_AnyType:$result);
+  let arguments = (ins
+    Arg<CIR_PointerType, "", [MemRead, MemWrite]>:$ptr,
+    CIR_AnyType:$val,
+    Arg<CIR_MemOrder, "memory order">:$mem_order,
+    UnitAttr:$is_volatile
+  );
+
+  let assemblyFormat = [{
+    $mem_order (`volatile` $is_volatile^)?
+    $ptr `,` $val
+    `:` qualified(type($ptr)) `->` type($result) attr-dict
+  }];
+}
+
 def CIR_AtomicCmpXchg : CIR_Op<"atomic.cmpxchg", [
   AllTypesMatch<["old", "expected", "desired"]>
 ]> {
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp 
b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index 86ba0299af3cf..e943b0252bf4e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -341,6 +341,7 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr 
*expr, Address dest,
   }
 
   assert(!cir::MissingFeatures::atomicSyncScopeID());
+  llvm::StringRef opName;
 
   CIRGenBuilderTy &builder = cgf.getBuilder();
   mlir::Location loc = cgf.getLoc(expr->getSourceRange());
@@ -400,6 +401,12 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr 
*expr, Address dest,
     return;
   }
 
+  case AtomicExpr::AO__c11_atomic_exchange:
+  case AtomicExpr::AO__atomic_exchange_n:
+  case AtomicExpr::AO__atomic_exchange:
+    opName = cir::AtomicXchg::getOperationName();
+    break;
+
   case AtomicExpr::AO__opencl_atomic_init:
 
   case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
@@ -421,11 +428,8 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr 
*expr, Address dest,
   case AtomicExpr::AO__scoped_atomic_store:
   case AtomicExpr::AO__scoped_atomic_store_n:
 
-  case AtomicExpr::AO__c11_atomic_exchange:
   case AtomicExpr::AO__hip_atomic_exchange:
   case AtomicExpr::AO__opencl_atomic_exchange:
-  case AtomicExpr::AO__atomic_exchange_n:
-  case AtomicExpr::AO__atomic_exchange:
   case AtomicExpr::AO__scoped_atomic_exchange_n:
   case AtomicExpr::AO__scoped_atomic_exchange:
 
@@ -503,8 +507,23 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr 
*expr, Address dest,
 
   case AtomicExpr::AO__atomic_clear:
     cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
-    break;
+    return;
   }
+
+  assert(!opName.empty() && "expected operation name to build");
+  mlir::Value loadVal1 = builder.createLoad(loc, val1);
+
+  SmallVector<mlir::Value> atomicOperands = {ptr.getPointer(), loadVal1};
+  SmallVector<mlir::Type> atomicResTys = {loadVal1.getType()};
+  mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
+                                          atomicOperands, atomicResTys);
+
+  rmwOp->setAttr("mem_order", orderAttr);
+  if (expr->isVolatile())
+    rmwOp->setAttr("is_volatile", builder.getUnitAttr());
+
+  mlir::Value result = rmwOp->getResult(0);
+  builder.createStore(loc, result, dest);
 }
 
 static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
@@ -572,6 +591,11 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
     val1 = emitPointerWithAlignment(e->getVal1());
     break;
 
+  case AtomicExpr::AO__atomic_exchange:
+    val1 = emitPointerWithAlignment(e->getVal1());
+    dest = emitPointerWithAlignment(e->getVal2());
+    break;
+
   case AtomicExpr::AO__atomic_compare_exchange:
   case AtomicExpr::AO__atomic_compare_exchange_n:
   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
@@ -590,7 +614,9 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
       isWeakExpr = e->getWeak();
     break;
 
+  case AtomicExpr::AO__atomic_exchange_n:
   case AtomicExpr::AO__atomic_store_n:
+  case AtomicExpr::AO__c11_atomic_exchange:
   case AtomicExpr::AO__c11_atomic_store:
     val1 = emitValToTemp(*this, e->getVal1());
     break;
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp 
b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 1d7e3df1430ac..f6543076d537a 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -701,6 +701,17 @@ mlir::LogicalResult 
CIRToLLVMAtomicCmpXchgLowering::matchAndRewrite(
   return mlir::success();
 }
 
+mlir::LogicalResult CIRToLLVMAtomicXchgLowering::matchAndRewrite(
+    cir::AtomicXchg op, OpAdaptor adaptor,
+    mlir::ConversionPatternRewriter &rewriter) const {
+  assert(!cir::MissingFeatures::atomicSyncScopeID());
+  mlir::LLVM::AtomicOrdering llvmOrder = 
getLLVMMemOrder(adaptor.getMemOrder());
+  rewriter.replaceOpWithNewOp<mlir::LLVM::AtomicRMWOp>(
+      op, mlir::LLVM::AtomicBinOp::xchg, adaptor.getPtr(), adaptor.getVal(),
+      llvmOrder);
+  return mlir::success();
+}
+
 mlir::LogicalResult CIRToLLVMBitClrsbOpLowering::matchAndRewrite(
     cir::BitClrsbOp op, OpAdaptor adaptor,
     mlir::ConversionPatternRewriter &rewriter) const {
@@ -2483,6 +2494,7 @@ void ConvertCIRToLLVMPass::runOnOperation() {
                CIRToLLVMAssumeAlignedOpLowering,
                CIRToLLVMAssumeSepStorageOpLowering,
                CIRToLLVMAtomicCmpXchgLowering,
+               CIRToLLVMAtomicXchgLowering,
                CIRToLLVMBaseClassAddrOpLowering,
                CIRToLLVMATanOpLowering,
                CIRToLLVMBinOpLowering,
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h 
b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
index 09ff7a0901c69..76016861b1837 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
@@ -144,6 +144,16 @@ class CIRToLLVMAtomicCmpXchgLowering
                   mlir::ConversionPatternRewriter &) const override;
 };
 
+class CIRToLLVMAtomicXchgLowering
+    : public mlir::OpConversionPattern<cir::AtomicXchg> {
+public:
+  using mlir::OpConversionPattern<cir::AtomicXchg>::OpConversionPattern;
+
+  mlir::LogicalResult
+  matchAndRewrite(cir::AtomicXchg op, OpAdaptor,
+                  mlir::ConversionPatternRewriter &) const override;
+};
+
 class CIRToLLVMBrCondOpLowering
     : public mlir::OpConversionPattern<cir::BrCondOp> {
 public:
diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c
index 0eba2959c0ebc..76289c597a2b5 100644
--- a/clang/test/CIR/CodeGen/atomic.c
+++ b/clang/test/CIR/CodeGen/atomic.c
@@ -415,3 +415,102 @@ void atomic_cmpxchg_n(int *ptr, int *expected, int 
desired) {
   // OGCG-NEXT:    %[[SUCCESS_2:.+]] = zext i1 %[[SUCCESS]] to i8
   // OGCG-NEXT:    store i8 %[[SUCCESS_2]], ptr %{{.+}}, align 1
 }
+
+void c11_atomic_exchange(_Atomic(int) *ptr, int value) {
+  // CIR-LABEL: @c11_atomic_exchange
+  // LLVM-LABEL: @c11_atomic_exchange
+  // OGCG-LABEL: @c11_atomic_exchange
+
+  __c11_atomic_exchange(ptr, value, __ATOMIC_RELAXED);
+  __c11_atomic_exchange(ptr, value, __ATOMIC_CONSUME);
+  __c11_atomic_exchange(ptr, value, __ATOMIC_ACQUIRE);
+  __c11_atomic_exchange(ptr, value, __ATOMIC_RELEASE);
+  __c11_atomic_exchange(ptr, value, __ATOMIC_ACQ_REL);
+  __c11_atomic_exchange(ptr, value, __ATOMIC_SEQ_CST);
+  // CIR: %{{.+}} = cir.atomic.xchg relaxed %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg consume %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg release %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg acq_rel %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg seq_cst %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} release, align 4
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acq_rel, align 4
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} release, align 4
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acq_rel, align 4
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+}
+
+void atomic_exchange(int *ptr, int *value, int *old) {
+  // CIR-LABEL: @atomic_exchange
+  // LLVM-LABEL: @atomic_exchange
+  // OGCG-LABEL: @atomic_exchange
+
+  __atomic_exchange(ptr, value, old, __ATOMIC_RELAXED);
+  __atomic_exchange(ptr, value, old, __ATOMIC_CONSUME);
+  __atomic_exchange(ptr, value, old, __ATOMIC_ACQUIRE);
+  __atomic_exchange(ptr, value, old, __ATOMIC_RELEASE);
+  __atomic_exchange(ptr, value, old, __ATOMIC_ACQ_REL);
+  __atomic_exchange(ptr, value, old, __ATOMIC_SEQ_CST);
+  // CIR: %{{.+}} = cir.atomic.xchg relaxed %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg consume %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg release %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg acq_rel %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg seq_cst %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} release, align 4
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acq_rel, align 4
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} release, align 4
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acq_rel, align 4
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+}
+
+void atomic_exchange_n(int *ptr, int value) {
+  // CIR-LABEL: @atomic_exchange_n
+  // LLVM-LABEL: @atomic_exchange_n
+  // OGCG-LABEL: @atomic_exchange_n
+
+  __atomic_exchange_n(ptr, value, __ATOMIC_RELAXED);
+  __atomic_exchange_n(ptr, value, __ATOMIC_CONSUME);
+  __atomic_exchange_n(ptr, value, __ATOMIC_ACQUIRE);
+  __atomic_exchange_n(ptr, value, __ATOMIC_RELEASE);
+  __atomic_exchange_n(ptr, value, __ATOMIC_ACQ_REL);
+  __atomic_exchange_n(ptr, value, __ATOMIC_SEQ_CST);
+  // CIR: %{{.+}} = cir.atomic.xchg relaxed %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg consume %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg acquire %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg release %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg acq_rel %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+  // CIR: %{{.+}} = cir.atomic.xchg seq_cst %{{.+}}, %{{.+}} : !cir.ptr<!s32i> 
-> !s32i
+
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} release, align 4
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acq_rel, align 4
+  // LLVM: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} monotonic, align 4
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acquire, align 4
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} release, align 4
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acq_rel, align 4
+  // OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+}
diff --git a/clang/test/CIR/IR/atomic.cir b/clang/test/CIR/IR/atomic.cir
new file mode 100644
index 0000000000000..6ca5af2aac175
--- /dev/null
+++ b/clang/test/CIR/IR/atomic.cir
@@ -0,0 +1,21 @@
+// RUN: cir-opt %s | FileCheck %s
+
+!s32i = !cir.int<s, 32>
+!u32i = !cir.int<u, 32>
+
+cir.func @atomic_xchg(%ptr: !cir.ptr<!s32i>, %val: !s32i) {
+  // CHECK-LABEL: @atomic_xchg
+  %0 = cir.atomic.xchg relaxed %ptr, %val : !cir.ptr<!s32i> -> !s32i
+  // CHECK: cir.atomic.xchg relaxed %{{.+}}, %{{.+}} : !cir.ptr<!s32i> -> !s32i
+  %1 = cir.atomic.xchg consume %ptr, %val : !cir.ptr<!s32i> -> !s32i
+  // CHECK: cir.atomic.xchg consume %{{.+}}, %{{.+}} : !cir.ptr<!s32i> -> !s32i
+  %2 = cir.atomic.xchg acquire %ptr, %val : !cir.ptr<!s32i> -> !s32i
+  // CHECK: cir.atomic.xchg acquire %{{.+}}, %{{.+}} : !cir.ptr<!s32i> -> !s32i
+  %3 = cir.atomic.xchg release %ptr, %val : !cir.ptr<!s32i> -> !s32i
+  // CHECK: cir.atomic.xchg release %{{.+}}, %{{.+}} : !cir.ptr<!s32i> -> !s32i
+  %4 = cir.atomic.xchg acq_rel %ptr, %val : !cir.ptr<!s32i> -> !s32i
+  // CHECK: cir.atomic.xchg acq_rel %{{.+}}, %{{.+}} : !cir.ptr<!s32i> -> !s32i
+  %5 = cir.atomic.xchg seq_cst %ptr, %val : !cir.ptr<!s32i> -> !s32i
+  // CHECK: cir.atomic.xchg seq_cst %{{.+}}, %{{.+}} : !cir.ptr<!s32i> -> !s32i
+  cir.return
+}

_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to