https://github.com/andykaylor updated 
https://github.com/llvm/llvm-project/pull/184707

>From f4b69104ab1572b56aba15d5e2f0f93ad7577984 Mon Sep 17 00:00:00 2001
From: Andy Kaylor <[email protected]>
Date: Tue, 25 Nov 2025 14:11:50 -0800
Subject: [PATCH 1/6] [CIR] Add support for delete cleanup after new operators

This adds support for calling operator delete when an exception is thrown
during initialization following an operator new call.

This does not yet handle the case where a temporary object is materialized
during the object initialization. That case is marked by the
"setupCleanupBlockActivation" diagnostic in deactivateCleanupBlock and
will be implemented in a future change.
---
 clang/include/clang/CIR/MissingFeatures.h |   2 +-
 clang/lib/CIR/CodeGen/CIRGenCall.cpp      |  10 +
 clang/lib/CIR/CodeGen/CIRGenCall.h        |   6 +-
 clang/lib/CIR/CodeGen/CIRGenCleanup.cpp   |  23 +-
 clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp   | 295 +++++++++++++++++-----
 clang/lib/CIR/CodeGen/CIRGenFunction.h    |  10 +
 clang/lib/CIR/CodeGen/EHScopeStack.h      |  19 ++
 clang/lib/CIR/Dialect/IR/CIRDialect.cpp   |  11 +-
 clang/test/CIR/CodeGen/new-delete.cpp     | 164 ++++++++++++
 9 files changed, 474 insertions(+), 66 deletions(-)
 create mode 100644 clang/test/CIR/CodeGen/new-delete.cpp

diff --git a/clang/include/clang/CIR/MissingFeatures.h 
b/clang/include/clang/CIR/MissingFeatures.h
index d206503d914f5..1e3a2c9af35d1 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -291,7 +291,6 @@ struct MissingFeatures {
   static bool handleBuiltinICEArguments() { return false; }
   static bool hip() { return false; }
   static bool incrementProfileCounter() { return false; }
-  static bool innermostEHScope() { return false; }
   static bool insertBuiltinUnpredictable() { return false; }
   static bool instrumentation() { return false; }
   static bool intrinsicElementTypeSupport() { return false; }
@@ -348,6 +347,7 @@ struct MissingFeatures {
   static bool targetCodeGenInfoGetNullPointer() { return false; }
   static bool thunks() { return false; }
   static bool tryEmitAsConstant() { return false; }
+  static bool typeAwareAllocation() { return false; }
   static bool typeChecks() { return false; }
   static bool useEHCleanupForArray() { return false; }
   static bool vaArgABILowering() { return false; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp 
b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
index c92296352db4e..49cde849e203d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
@@ -1021,6 +1021,16 @@ CIRGenTypes::arrangeFunctionDeclaration(const 
FunctionDecl *fd) {
   return arrangeFreeFunctionType(funcTy.castAs<FunctionProtoType>());
 }
 
+RValue CallArg::getRValue(CIRGenFunction &cgf, mlir::Location loc) const {
+  if (!hasLV)
+    return rv;
+  LValue copy = cgf.makeAddrLValue(cgf.createMemTemp(ty, loc), ty);
+  cgf.emitAggregateCopy(copy, lv, ty, AggValueSlot::DoesNotOverlap,
+                        lv.isVolatile());
+  isUsed = true;
+  return RValue::getAggregate(copy.getAddress());
+}
+
 static cir::CIRCallOpInterface
 emitCallLikeOp(CIRGenFunction &cgf, mlir::Location callLoc,
                cir::FuncType indirectFuncTy, mlir::Value indirectFuncVal,
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h 
b/clang/lib/CIR/CodeGen/CIRGenCall.h
index 347bd4a7c8266..b30b4969ca45e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.h
@@ -202,7 +202,7 @@ struct CallArg {
 
   /// A data-flow flag to make sure getRValue and/or copyInto are not
   /// called twice for duplicated IR emission.
-  [[maybe_unused]] mutable bool isUsed;
+  mutable bool isUsed;
 
 public:
   clang::QualType ty;
@@ -215,6 +215,10 @@ struct CallArg {
 
   bool hasLValue() const { return hasLV; }
 
+  /// \returns an independent RValue. If the CallArg contains an LValue,
+  /// a temporary copy is returned.
+  RValue getRValue(CIRGenFunction &cgf, mlir::Location loc) const;
+
   LValue getKnownLValue() const {
     assert(hasLV && !isUsed);
     return lv;
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp 
b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
index bdb2947200f23..cbed8452810c5 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -95,7 +95,6 @@ void *EHScopeStack::pushCleanup(CleanupKind kind, size_t 
size) {
   bool isLifetimeMarker = kind & LifetimeMarker;
   bool skipCleanupScope = false;
 
-  assert(!cir::MissingFeatures::innermostEHScope());
   cir::CleanupKind cleanupKind = cir::CleanupKind::All;
   if (isEHCleanup && cgf->getLangOpts().Exceptions) {
     cleanupKind =
@@ -193,6 +192,25 @@ bool EHScopeStack::requiresCatchOrCleanup() const {
   return false;
 }
 
+/// Deactive a cleanup that was created in an active state.
+void CIRGenFunction::deactivateCleanupBlock(EHScopeStack::stable_iterator c,
+                                            mlir::Operation *dominatingIP) {
+  assert(c != ehStack.stable_end() && "deactivating bottom of stack?");
+  EHCleanupScope &scope = cast<EHCleanupScope>(*ehStack.find(c));
+  assert(scope.isActive() && "double deactivation");
+
+  // If it's the top of the stack, just pop it, but do so only if it belongs
+  // to the current RunCleanupsScope.
+  if (c == ehStack.stable_begin() &&
+      currentCleanupStackDepth.strictlyEncloses(c)) {
+    popCleanupBlock();
+    return;
+  }
+
+  // Otherwise, follow the general case.
+  cgm.errorNYI("deactivateCleanupBlock: setupCleanupBlockActivation");
+}
+
 static void emitCleanup(CIRGenFunction &cgf, cir::CleanupScopeOp cleanupScope,
                         EHScopeStack::Cleanup *cleanup,
                         EHScopeStack::Cleanup::Flags flags) {
@@ -245,10 +263,11 @@ void CIRGenFunction::popCleanupBlock() {
   bool hasFallthrough = fallthroughSource != nullptr && isActive;
 
   bool requiresNormalCleanup = scope.isNormalCleanup() && hasFallthrough;
+  bool requiresEHCleanup = scope.isEHCleanup() && hasFallthrough;
 
   // If we don't need the cleanup at all, we're done.
   assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
-  if (!requiresNormalCleanup) {
+  if (!requiresNormalCleanup && !requiresEHCleanup) {
     ehStack.popCleanup();
     return;
   }
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index 56a7539a841d1..9fddc0eb6ebcf 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -647,6 +647,198 @@ static mlir::Value emitCXXNewAllocSize(CIRGenFunction 
&cgf, const CXXNewExpr *e,
   return size;
 }
 
+/// Emit a call to an operator new or operator delete function, as implicitly
+/// created by new-expressions and delete-expressions.
+static RValue emitNewDeleteCall(CIRGenFunction &cgf,
+                                const FunctionDecl *calleeDecl,
+                                const FunctionProtoType *calleeType,
+                                const CallArgList &args) {
+  cir::CIRCallOpInterface callOrTryCall;
+  cir::FuncOp calleePtr = cgf.cgm.getAddrOfFunction(calleeDecl);
+  CIRGenCallee callee =
+      CIRGenCallee::forDirect(calleePtr, GlobalDecl(calleeDecl));
+  RValue rv =
+      cgf.emitCall(cgf.cgm.getTypes().arrangeFreeFunctionCall(args, 
calleeType),
+                   callee, ReturnValueSlot(), args, &callOrTryCall);
+
+  /// C++1y [expr.new]p10:
+  ///   [In a new-expression,] an implementation is allowed to omit a call
+  ///   to a replaceable global allocation function.
+  ///
+  /// We model such elidable calls with the 'builtin' attribute.
+  assert(!cir::MissingFeatures::attributeBuiltin());
+  return rv;
+}
+
+RValue CIRGenFunction::emitNewOrDeleteBuiltinCall(const FunctionProtoType 
*type,
+                                                  const CallExpr *callExpr,
+                                                  OverloadedOperatorKind op) {
+  CallArgList args;
+  emitCallArgs(args, type, callExpr->arguments());
+  // Find the allocation or deallocation function that we're calling.
+  ASTContext &astContext = getContext();
+  assert(op == OO_New || op == OO_Delete);
+  DeclarationName name = astContext.DeclarationNames.getCXXOperatorName(op);
+
+  clang::DeclContextLookupResult lookupResult =
+      astContext.getTranslationUnitDecl()->lookup(name);
+  for (const auto *decl : lookupResult) {
+    if (const auto *funcDecl = dyn_cast<FunctionDecl>(decl)) {
+      if (astContext.hasSameType(funcDecl->getType(), QualType(type, 0))) {
+        if (sanOpts.has(SanitizerKind::AllocToken)) {
+          // TODO: Set !alloc_token metadata.
+          assert(!cir::MissingFeatures::allocToken());
+          cgm.errorNYI("Alloc token sanitizer not yet supported!");
+        }
+
+        // Emit the call to operator new/delete.
+        return emitNewDeleteCall(*this, funcDecl, type, args);
+      }
+    }
+  }
+
+  llvm_unreachable("predeclared global operator new/delete is missing");
+}
+
+namespace {
+/// A cleanup to call the given 'operator delete' function upon abnormal
+/// exit from a new expression. Templated on a traits type that deals with
+/// ensuring that the arguments dominate the cleanup if necessary.
+template <typename Traits>
+class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
+  /// Type used to hold llvm::Value*s.
+  typedef typename Traits::ValueTy ValueTy;
+  /// Type used to hold RValues.
+  typedef typename Traits::RValueTy RValueTy;
+  struct PlacementArg {
+    RValueTy argValue;
+    QualType argType;
+  };
+
+  unsigned numPlacementArgs : 30;
+  LLVM_PREFERRED_TYPE(AlignedAllocationMode)
+  unsigned passAlignmentToPlacementDelete : 1;
+  const FunctionDecl *operatorDelete;
+  ValueTy ptr;
+  ValueTy allocSize;
+  CharUnits allocAlign;
+
+  PlacementArg *getPlacementArgs() {
+    return reinterpret_cast<PlacementArg *>(this + 1);
+  }
+
+public:
+  static size_t getExtraSize(size_t numPlacementArgs) {
+    return numPlacementArgs * sizeof(PlacementArg);
+  }
+
+  CallDeleteDuringNew(size_t numPlacementArgs,
+                      const FunctionDecl *operatorDelete, ValueTy ptr,
+                      ValueTy allocSize,
+                      const ImplicitAllocationParameters &iap,
+                      CharUnits allocAlign)
+      : numPlacementArgs(numPlacementArgs),
+        passAlignmentToPlacementDelete(isAlignedAllocation(iap.PassAlignment)),
+        operatorDelete(operatorDelete), ptr(ptr), allocSize(allocSize),
+        allocAlign(allocAlign) {}
+
+  void setPlacementArg(unsigned i, RValueTy argValue, QualType argType) {
+    assert(i < numPlacementArgs && "index out of range");
+    getPlacementArgs()[i] = {argValue, argType};
+  }
+
+  void emit(CIRGenFunction &cgf, Flags flags) override {
+    const auto *fpt = operatorDelete->getType()->castAs<FunctionProtoType>();
+    CallArgList deleteArgs;
+
+    unsigned firstNonTypeArg = 0;
+    TypeAwareAllocationMode typeAwareDeallocation = 
TypeAwareAllocationMode::No;
+    assert(!cir::MissingFeatures::typeAwareAllocation());
+
+    // The first argument after type-identity parameter (if any) is always
+    // a void* (or C* for a destroying operator delete for class type C).
+    deleteArgs.add(Traits::get(cgf, ptr), fpt->getParamType(firstNonTypeArg));
+
+    // Figure out what other parameters we should be implicitly passing.
+    UsualDeleteParams params;
+    if (numPlacementArgs) {
+      // A placement deallocation function is implicitly passed an alignment
+      // if the placement allocation function was, but is never passed a size.
+      params.Alignment =
+          alignedAllocationModeFromBool(passAlignmentToPlacementDelete);
+      params.TypeAwareDelete = typeAwareDeallocation;
+      params.Size = isTypeAwareAllocation(params.TypeAwareDelete);
+    } else {
+      // For a non-placement new-expression, 'operator delete' can take a
+      // size and/or an alignment if it has the right parameters.
+      params = operatorDelete->getUsualDeleteParams();
+    }
+
+    assert(!params.DestroyingDelete &&
+           "should not call destroying delete in a new-expression");
+
+    // The second argument can be a std::size_t (for non-placement delete).
+    if (params.Size)
+      deleteArgs.add(Traits::get(cgf, allocSize),
+                     cgf.getContext().getSizeType());
+
+    // The next (second or third) argument can be a std::align_val_t, which
+    // is an enum whose underlying type is std::size_t.
+    // FIXME: Use the right type as the parameter type. Note that in a call
+    // to operator delete(size_t, ...), we may not have it available.
+    if (isAlignedAllocation(params.Alignment))
+      cgf.cgm.errorNYI("CallDeleteDuringNew: aligned allocation");
+
+    // Pass the rest of the arguments, which must match exactly.
+    for (unsigned i = 0; i != numPlacementArgs; ++i) {
+      auto arg = getPlacementArgs()[i];
+      deleteArgs.add(Traits::get(cgf, arg.argValue), arg.argType);
+    }
+
+    // Call 'operator delete'.
+    emitNewDeleteCall(cgf, operatorDelete, fpt, deleteArgs);
+  }
+};
+} // namespace
+
+/// Enter a cleanup to call 'operator delete' if the initializer in a
+/// new-expression throws.
+static void enterNewDeleteCleanup(CIRGenFunction &cgf, const CXXNewExpr *e,
+                                  Address newPtr, mlir::Value allocSize,
+                                  CharUnits allocAlign,
+                                  const CallArgList &newArgs) {
+  unsigned numNonPlacementArgs = e->getNumImplicitArgs();
+
+  // If we're not inside a conditional branch, then the cleanup will
+  // dominate and we can do the easier (and more efficient) thing.
+  if (!cgf.isInConditionalBranch()) {
+    struct DirectCleanupTraits {
+      typedef mlir::Value ValueTy;
+      typedef RValue RValueTy;
+      static RValue get(CIRGenFunction &, ValueTy v) { return RValue::get(v); }
+      static RValue get(CIRGenFunction &, RValueTy v) { return v; }
+    };
+
+    typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
+
+    assert(!cir::MissingFeatures::typeAwareAllocation());
+    DirectCleanup *cleanup = cgf.ehStack.pushCleanupWithExtra<DirectCleanup>(
+        EHCleanup, e->getNumPlacementArgs(), e->getOperatorDelete(),
+        newPtr.getPointer(), allocSize, e->implicitAllocationParameters(),
+        allocAlign);
+    for (unsigned i = 0, n = e->getNumPlacementArgs(); i != n; ++i) {
+      const CallArg &arg = newArgs[i + numNonPlacementArgs];
+      cleanup->setPlacementArg(
+          i, arg.getRValue(cgf, cgf.getLoc(e->getSourceRange())), arg.ty);
+    }
+
+    return;
+  }
+
+  cgf.cgm.errorNYI(e->getSourceRange(),
+                   "enterNewDeleteCleanup: conditional branch");
+}
+
 static void storeAnyExprIntoOneUnit(CIRGenFunction &cgf, const Expr *init,
                                     QualType allocType, Address newPtr,
                                     AggValueSlot::Overlap_t mayOverlap) {
@@ -912,59 +1104,6 @@ RValue CIRGenFunction::emitCXXPseudoDestructorExpr(
   return RValue::get(nullptr);
 }
 
-/// Emit a call to an operator new or operator delete function, as implicitly
-/// created by new-expressions and delete-expressions.
-static RValue emitNewDeleteCall(CIRGenFunction &cgf,
-                                const FunctionDecl *calleeDecl,
-                                const FunctionProtoType *calleeType,
-                                const CallArgList &args) {
-  cir::CIRCallOpInterface callOrTryCall;
-  cir::FuncOp calleePtr = cgf.cgm.getAddrOfFunction(calleeDecl);
-  CIRGenCallee callee =
-      CIRGenCallee::forDirect(calleePtr, GlobalDecl(calleeDecl));
-  RValue rv =
-      cgf.emitCall(cgf.cgm.getTypes().arrangeFreeFunctionCall(args, 
calleeType),
-                   callee, ReturnValueSlot(), args, &callOrTryCall);
-
-  /// C++1y [expr.new]p10:
-  ///   [In a new-expression,] an implementation is allowed to omit a call
-  ///   to a replaceable global allocation function.
-  ///
-  /// We model such elidable calls with the 'builtin' attribute.
-  assert(!cir::MissingFeatures::attributeBuiltin());
-  return rv;
-}
-
-RValue CIRGenFunction::emitNewOrDeleteBuiltinCall(const FunctionProtoType 
*type,
-                                                  const CallExpr *callExpr,
-                                                  OverloadedOperatorKind op) {
-  CallArgList args;
-  emitCallArgs(args, type, callExpr->arguments());
-  // Find the allocation or deallocation function that we're calling.
-  ASTContext &astContext = getContext();
-  assert(op == OO_New || op == OO_Delete);
-  DeclarationName name = astContext.DeclarationNames.getCXXOperatorName(op);
-
-  clang::DeclContextLookupResult lookupResult =
-      astContext.getTranslationUnitDecl()->lookup(name);
-  for (const auto *decl : lookupResult) {
-    if (const auto *funcDecl = dyn_cast<FunctionDecl>(decl)) {
-      if (astContext.hasSameType(funcDecl->getType(), QualType(type, 0))) {
-        if (sanOpts.has(SanitizerKind::AllocToken)) {
-          // TODO: Set !alloc_token metadata.
-          assert(!cir::MissingFeatures::allocToken());
-          cgm.errorNYI("Alloc token sanitizer not yet supported!");
-        }
-
-        // Emit the call to operator new/delete.
-        return emitNewDeleteCall(*this, funcDecl, type, args);
-      }
-    }
-  }
-
-  llvm_unreachable("predeclared global operator new/delete is missing");
-}
-
 namespace {
 /// Calls the given 'operator delete' on a single object.
 struct CallObjectDelete final : EHScopeStack::Cleanup {
@@ -1190,10 +1329,28 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const 
CXXNewExpr *e) {
     cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: null check");
 
   // If there's an operator delete, enter a cleanup to call it if an
-  // exception is thrown.
-  if (e->getOperatorDelete() &&
-      !e->getOperatorDelete()->isReservedGlobalPlacementOperator())
-    cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: operator delete");
+  // exception is thrown. If we do this, we'l be creating the result pointer
+  // inside a cleanup scope, either with a bitcast or an offset based on the
+  // array cookie size. However, we need to return that pointer from outside
+  // the cleanup scope, so we need to store it in a temporary variable.
+  bool useNewDeleteCleanup =
+      e->getOperatorDelete() &&
+      !e->getOperatorDelete()->isReservedGlobalPlacementOperator();
+  // These variables are only used if we use the new delete cleanup.
+  mlir::OpBuilder::InsertPoint beforeNewDeleteCleanup;
+  EHScopeStack::stable_iterator operatorDeleteCleanup;
+  Address resultPtr = Address::invalid();
+  mlir::Operation *cleanupDominator = nullptr;
+  if (useNewDeleteCleanup) {
+    beforeNewDeleteCleanup = builder.saveInsertionPoint();
+    assert(!cir::MissingFeatures::typeAwareAllocation());
+    enterNewDeleteCleanup(*this, e, allocation, allocSize, allocAlign,
+                          allocatorArgs);
+    operatorDeleteCleanup = ehStack.stable_begin();
+    cleanupDominator =
+        cir::UnreachableOp::create(builder, getLoc(e->getSourceRange()))
+            .getOperation();
+  }
 
   if (allocSize != allocSizeWithoutCookie) {
     assert(e->isArray());
@@ -1212,6 +1369,15 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const 
CXXNewExpr *e) {
   Address result = builder.createElementBitCast(getLoc(e->getSourceRange()),
                                                 allocation, elementTy);
 
+  // If we're inside a new delete cleanup, store the result pointer.
+  if (useNewDeleteCleanup) {
+    resultPtr =
+        createTempAlloca(builder.getPointerTo(elementTy), 
result.getAlignment(),
+                         getLoc(e->getSourceRange()), "__new_result");
+    builder.createStore(getLoc(e->getSourceRange()), result.getPointer(),
+                        resultPtr);
+  }
+
   // Passing pointer through launder.invariant.group to avoid propagation of
   // vptrs information which may be included in previous type.
   // To not break LTO with different optimizations levels, we do it regardless
@@ -1224,6 +1390,21 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const 
CXXNewExpr *e) {
 
   emitNewInitializer(*this, e, allocType, elementTy, result, numElements,
                      allocSizeWithoutCookie);
+
+  // Deactivate the 'operator delete' cleanup if we finished
+  // initialization.
+  if (useNewDeleteCleanup) {
+    assert(operatorDeleteCleanup.isValid());
+    assert(resultPtr.isValid());
+    deactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
+    cleanupDominator->erase();
+    cir::LoadOp loadResult =
+        builder.createLoad(getLoc(e->getSourceRange()), resultPtr);
+    result = result.withPointer(loadResult.getResult());
+  }
+
+  assert(!cir::MissingFeatures::exprNewNullCheck());
+
   return result.getPointer();
 }
 
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h 
b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 56bdcfd7f8906..bbcffe0db87a3 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -970,6 +970,16 @@ class CIRGenFunction : public CIRGenTypeCache {
                         ArrayRef<mlir::Value *> valuesToReload = {});
   void popCleanupBlock();
 
+  /// Deactivates the given cleanup block. The block cannot be reactivated. 
Pops
+  /// it if it's the top of the stack.
+  ///
+  /// \param DominatingIP - An instruction which is known to
+  ///   dominate the current IP (if set) and which lies along
+  ///   all paths of execution between the current IP and the
+  ///   the point at which the cleanup comes into scope.
+  void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup,
+                              mlir::Operation *dominatingIP);
+
   /// Push a cleanup to be run at the end of the current full-expression.  Safe
   /// against the possibility that we're currently inside a
   /// conditionally-evaluated expression.
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h 
b/clang/lib/CIR/CodeGen/EHScopeStack.h
index 9d614c858dbe1..09b78820a2587 100644
--- a/clang/lib/CIR/CodeGen/EHScopeStack.h
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -187,6 +187,25 @@ class EHScopeStack {
     [[maybe_unused]] Cleanup *obj = new (buffer) T(a...);
   }
 
+  /// Push a cleanup with non-constant storage requirements on the
+  /// stack.  The cleanup type must provide an additional static method:
+  ///   static size_t getExtraSize(size_t);
+  /// The argument to this method will be the value N, which will also
+  /// be passed as the first argument to the constructor.
+  ///
+  /// The data stored in the extra storage must obey the same
+  /// restrictions as normal cleanup member data.
+  ///
+  /// The pointer returned from this method is valid until the cleanup
+  /// stack is modified.
+  template <class T, class... As>
+  T *pushCleanupWithExtra(CleanupKind kind, size_t n, As... a) {
+    static_assert(alignof(T) <= ScopeStackAlignment,
+                  "Cleanup's alignment is too large.");
+    void *buffer = pushCleanup(kind, sizeof(T) + T::getExtraSize(n));
+    return new (buffer) T(n, a...);
+  }
+
   void setCGF(CIRGenFunction *inCGF) { cgf = inCGF; }
 
   /// Pops a cleanup scope off the stack.  This is private to 
CIRGenCleanup.cpp.
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp 
b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index 2599125c5bb4a..6f6d2f0a82916 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -2563,14 +2563,15 @@ void cir::TernaryOp::build(
 
   // Get result type from whichever branch has a yield (the other may have
   // unreachable from a throw expression)
-  auto yield =
-      dyn_cast_or_null<cir::YieldOp>(trueRegion->back().getTerminator());
-  if (!yield)
+  cir::YieldOp yield;
+  if (trueRegion->back().mightHaveTerminator())
+    yield = dyn_cast_or_null<cir::YieldOp>(trueRegion->back().getTerminator());
+  if (!yield && falseRegion->back().mightHaveTerminator())
     yield = 
dyn_cast_or_null<cir::YieldOp>(falseRegion->back().getTerminator());
 
-  assert((yield && yield.getNumOperands() <= 1) &&
+  assert((!yield || yield.getNumOperands() <= 1) &&
          "expected zero or one result type");
-  if (yield.getNumOperands() == 1)
+  if (yield && yield.getNumOperands() == 1)
     result.addTypes(TypeRange{yield.getOperandTypes().front()});
 }
 
diff --git a/clang/test/CIR/CodeGen/new-delete.cpp 
b/clang/test/CIR/CodeGen/new-delete.cpp
new file mode 100644
index 0000000000000..58db8f8646f4c
--- /dev/null
+++ b/clang/test/CIR/CodeGen/new-delete.cpp
@@ -0,0 +1,164 @@
+// RUN: %clang_cc1 -no-enable-noundef-analysis %s -triple=x86_64-linux-gnu 
-fclangir -emit-cir -std=c++98 -fcxx-exceptions -fexceptions -o %t.cir
+// RUN: FileCheck -check-prefixes=CIR --input-file=%t.cir %s
+// RUN: %clang_cc1 -no-enable-noundef-analysis %s -triple=x86_64-linux-gnu 
-fclangir -emit-llvm -std=c++98 -fcxx-exceptions -fexceptions -o %t-cir.ll
+// RUN: FileCheck -check-prefixes=LLVM --input-file=%t-cir.ll %s
+// RUN: %clang_cc1 -no-enable-noundef-analysis %s -triple=x86_64-linux-gnu 
-emit-llvm -std=c++98 -fcxx-exceptions -fexceptions -o %t.ll
+// RUN: FileCheck -check-prefixes=OGCG --input-file=%t.ll %s
+
+
+struct A { A(int); ~A(); void *p; };
+
+A *a() {
+  return new A(5);
+}
+
+// CIR: cir.func {{.*}} @_Z1av() -> !cir.ptr<!rec_A> {
+// CIR:   %[[RETVAL:.*]] = cir.alloca !cir.ptr<!rec_A>, 
!cir.ptr<!cir.ptr<!rec_A>>, ["__retval"]
+// CIR:   %[[NEW_RESULT:.*]] = cir.alloca !cir.ptr<!rec_A>, 
!cir.ptr<!cir.ptr<!rec_A>>, ["__new_result"]
+// CIR:   %[[ALLOC_SIZE:.*]] = cir.const #cir.int<8> : !u64i
+// CIR:   %[[PTR:.*]] = cir.call @_Znwm(%[[ALLOC_SIZE]])
+// CIR:   cir.cleanup.scope {
+// CIR:     %[[PTR_A:.*]] = cir.cast bitcast %[[PTR]] : !cir.ptr<!void> -> 
!cir.ptr<!rec_A>
+// CIR:     cir.store{{.*}} %[[PTR_A]], %[[NEW_RESULT]] : !cir.ptr<!rec_A>, 
!cir.ptr<!cir.ptr<!rec_A>>
+// CIR:     %[[FIVE:.*]] = cir.const #cir.int<5> : !s32i
+// CIR:     cir.call @_ZN1AC1Ei(%[[PTR_A]], %[[FIVE]])
+// CIR:     cir.yield
+// CIR:   } cleanup  eh {
+// CIR:     cir.call @_ZdlPv(%[[PTR]]) nothrow : (!cir.ptr<!void>) -> ()
+// CIR:     cir.yield
+// CIR:   }
+
+// LLVM: define {{.*}} ptr @_Z1av() {{.*}} personality ptr 
@__gxx_personality_v0 {
+// LLVM:   %[[RETVAL:.*]] = alloca ptr
+// LLVM:   %[[NEW_RESULT:.*]] = alloca ptr
+// LLVM:   %[[PTR:.*]] = call ptr @_Znwm(i64 8)
+// LLVM:   br label %[[EH_SCOPE:.*]]
+// LLVM: [[EH_SCOPE]]:
+// LLVM:   store ptr %[[PTR]], ptr %[[NEW_RESULT]]
+// LLVM:   invoke void @_ZN1AC1Ei(ptr %[[PTR]], i32 5)
+// LLVM:           to label %[[INVOKE_CONT:.*]] unwind label %[[UNWIND:.*]]
+// LLVM: [[INVOKE_CONT]]:
+// LLVM:   br label %[[EH_SCOPE_END:.*]]
+// LLVM: [[UNWIND]]:
+// LLVM:   %[[EXN:.*]] = landingpad { ptr, i32 }
+// LLVM:          cleanup
+// LLVM:   %[[EXN_PTR:.*]] = extractvalue { ptr, i32 } %[[EXN]], 0
+// LLVM:   %[[TYPEID:.*]] = extractvalue { ptr, i32 } %[[EXN]], 1
+// LLVM:   br label %[[EH_CLEANUP:.*]]
+// LLVM: [[EH_CLEANUP]]:
+// LLVM:   %[[EXN_PTR_PHI:.*]] = phi ptr [ %[[EXN_PTR]], %[[UNWIND]] ]
+// LLVM:   %[[TYPEID_PHI:.*]] = phi i32 [ %[[TYPEID]], %[[UNWIND]] ]
+// LLVM:   call void @_ZdlPv(ptr %[[PTR]])
+// LLVM:   %[[EXN_INSERT:.*]] = insertvalue { ptr, i32 } poison, ptr 
%[[EXN_PTR_PHI]], 0
+// LLVM:   %[[EXN_INSERT_2:.*]] = insertvalue { ptr, i32 } %[[EXN_INSERT]], 
i32 %[[TYPEID_PHI]], 1
+// LLVM:   resume { ptr, i32 } %[[EXN_INSERT_2]]
+// LLVM: [[EH_SCOPE_END]]:
+// LLVM:   %[[LOAD:.*]] = load ptr, ptr %[[NEW_RESULT]]
+// LLVM:   store ptr %[[LOAD]], ptr %[[RETVAL]]
+// LLVM:   %[[RET:.*]] = load ptr, ptr %[[RETVAL]]
+// LLVM:   ret ptr %[[RET]]
+
+// OGCG: define {{.*}} ptr @_Z1av() {{.*}} personality ptr 
@__gxx_personality_v0 {
+// OGCG:   %[[EXN_SLOT:.*]] = alloca ptr
+// OGCG:   %[[EHSELECTOR_SLOT:.*]] = alloca i32
+// OGCG:   %[[PTR:.*]] = call {{.*}} ptr @_Znwm(i64 8)
+// OGCG:   invoke void @_ZN1AC1Ei(ptr {{.*}} %[[PTR]], i32 5)
+// OGCG:           to label %[[INVOKE_CONT:.*]] unwind label %[[UNWIND:.*]]
+// OGCG: [[INVOKE_CONT]]:
+// OGCG:   ret ptr %[[PTR]]
+// OGCG: [[UNWIND]]:
+// OGCG:   %[[EXN:.*]] = landingpad { ptr, i32 }
+// OGCG:          cleanup
+// OGCG:   %[[EXN_PTR:.*]] = extractvalue { ptr, i32 } %[[EXN]], 0
+// OGCG:   store ptr %[[EXN_PTR]], ptr %[[EXN_SLOT]]
+// OGCG:   %[[TYPEID:.*]] = extractvalue { ptr, i32 } %[[EXN]], 1
+// OGCG:   store i32 %[[TYPEID]], ptr %[[EHSELECTOR_SLOT]]
+// OGCG:   call void @_ZdlPv(ptr %[[PTR]])
+// OGCG:   br label %[[EH_RESUME:.*]]
+// OGCG: [[EH_RESUME]]:
+// OGCG:   %[[EXN_PTR:.*]] = load ptr, ptr %[[EXN_SLOT]]
+// OGCG:   %[[EHSELECTOR:.*]] = load i32, ptr %[[EHSELECTOR_SLOT]]
+// OGCG:   %[[EXN_INSERT:.*]] = insertvalue { ptr, i32 } poison, ptr 
%[[EXN_PTR]], 0
+// OGCG:   %[[EXN_INSERT_2:.*]] = insertvalue { ptr, i32 } %[[EXN_INSERT]], 
i32 %[[EHSELECTOR]], 1
+// OGCG:   resume { ptr, i32 } %[[EXN_INSERT_2]]
+
+A *b() {
+  extern int foo();
+  return new A(foo());
+}
+
+// CIR: cir.func {{.*}} @_Z1bv() -> !cir.ptr<!rec_A> {
+// CIR:   %[[RETVAL:.*]] = cir.alloca !cir.ptr<!rec_A>, 
!cir.ptr<!cir.ptr<!rec_A>>, ["__retval"]
+// CIR:   %[[NEW_RESULT:.*]] = cir.alloca !cir.ptr<!rec_A>, 
!cir.ptr<!cir.ptr<!rec_A>>, ["__new_result"]
+// CIR:   %[[ALLOC_SIZE:.*]] = cir.const #cir.int<8> : !u64i
+// CIR:   %[[PTR:.*]] = cir.call @_Znwm(%[[ALLOC_SIZE]])
+// CIR:   cir.cleanup.scope {
+// CIR:     %[[PTR_A:.*]] = cir.cast bitcast %[[PTR]] : !cir.ptr<!void> -> 
!cir.ptr<!rec_A>
+// CIR:     cir.store{{.*}} %[[PTR_A]], %[[NEW_RESULT]] : !cir.ptr<!rec_A>, 
!cir.ptr<!cir.ptr<!rec_A>>
+// CIR:     %[[FOO:.*]] = cir.call @_Z3foov() : () -> !s32i
+// CIR:     cir.call @_ZN1AC1Ei(%[[PTR_A]], %[[FOO]])
+// CIR:     cir.yield
+// CIR:   } cleanup  eh {
+// CIR:     cir.call @_ZdlPv(%[[PTR]]) nothrow : (!cir.ptr<!void>) -> ()
+// CIR:     cir.yield
+// CIR:   }
+
+// LLVM: define {{.*}} ptr @_Z1bv() {{.*}} personality ptr 
@__gxx_personality_v0 {
+// LLVM:   %[[RETVAL:.*]] = alloca ptr
+// LLVM:   %[[NEW_RESULT:.*]] = alloca ptr
+// LLVM:   %[[PTR:.*]] = call ptr @_Znwm(i64 8)
+// LLVM:   br label %[[EH_SCOPE:.*]]
+// LLVM: [[EH_SCOPE]]:
+// LLVM:   store ptr %[[PTR]], ptr %[[NEW_RESULT]]
+// LLVM:   %[[FOO:.*]] = invoke i32 @_Z3foov()
+// LLVM:           to label %[[INVOKE_CONT:.*]] unwind label %[[UNWIND:.*]]
+// LLVM: [[INVOKE_CONT]]:
+// LLVM:   invoke void @_ZN1AC1Ei(ptr %[[PTR]], i32 %[[FOO]])
+// LLVM:           to label %[[INVOKE_CONT_2:.*]] unwind label %[[UNWIND:.*]]
+// LLVM: [[INVOKE_CONT_2]]:
+// LLVM:   br label %[[EH_SCOPE_END:.*]]
+// LLVM: [[UNWIND]]:
+// LLVM:   %[[EXN:.*]] = landingpad { ptr, i32 }
+// LLVM:          cleanup
+// LLVM:   %[[EXN_PTR:.*]] = extractvalue { ptr, i32 } %[[EXN]], 0
+// LLVM:   %[[TYPEID:.*]] = extractvalue { ptr, i32 } %[[EXN]], 1
+// LLVM:   br label %[[EH_CLEANUP:.*]]
+// LLVM: [[EH_CLEANUP]]:
+// LLVM:   %[[EXN_PTR_PHI:.*]] = phi ptr [ %[[EXN_PTR]], %[[UNWIND]] ]
+// LLVM:   %[[TYPEID_PHI:.*]] = phi i32 [ %[[TYPEID]], %[[UNWIND]] ]
+// LLVM:   call void @_ZdlPv(ptr %[[PTR]])
+// LLVM:   %[[EXN_INSERT:.*]] = insertvalue { ptr, i32 } poison, ptr 
%[[EXN_PTR_PHI]], 0
+// LLVM:   %[[EXN_INSERT_2:.*]] = insertvalue { ptr, i32 } %[[EXN_INSERT]], 
i32 %[[TYPEID_PHI]], 1
+// LLVM:   resume { ptr, i32 } %[[EXN_INSERT_2]]
+// LLVM: [[EH_SCOPE_END]]:
+// LLVM:   %[[LOAD:.*]] = load ptr, ptr %[[NEW_RESULT]]
+// LLVM:   store ptr %[[LOAD]], ptr %[[RETVAL]]
+// LLVM:   %[[RET:.*]] = load ptr, ptr %[[RETVAL]]
+// LLVM:   ret ptr %[[RET]]
+
+// OGCG: define {{.*}} ptr @_Z1bv() {{.*}} personality ptr 
@__gxx_personality_v0 {
+// OGCG:   %[[EXN_SLOT:.*]] = alloca ptr
+// OGCG:   %[[EHSELECTOR_SLOT:.*]] = alloca i32
+// OGCG:   %[[PTR:.*]] = call {{.*}} ptr @_Znwm(i64 8)
+// OGCG:   %[[FOO:.*]] = invoke i32 @_Z3foov()
+// OGCG:           to label %[[INVOKE_CONT:.*]] unwind label %[[UNWIND:.*]]
+// OGCG: [[INVOKE_CONT]]:
+// OGCG:   invoke void @_ZN1AC1Ei(ptr {{.*}} %[[PTR]], i32 %[[FOO]])
+// OGCG:           to label %[[INVOKE_CONT_2:.*]] unwind label %[[UNWIND:.*]]
+// OGCG: [[INVOKE_CONT_2]]:
+// OGCG:   ret ptr %[[PTR]]
+// OGCG: [[UNWIND]]:
+// OGCG:   %[[EXN:.*]] = landingpad { ptr, i32 }
+// OGCG:          cleanup
+// OGCG:   %[[EXN_PTR:.*]] = extractvalue { ptr, i32 } %[[EXN]], 0
+// OGCG:   store ptr %[[EXN_PTR]], ptr %[[EXN_SLOT]]
+// OGCG:   %[[TYPEID:.*]] = extractvalue { ptr, i32 } %[[EXN]], 1
+// OGCG:   store i32 %[[TYPEID]], ptr %[[EHSELECTOR_SLOT]]
+// OGCG:   call void @_ZdlPv(ptr %[[PTR]])
+// OGCG:   br label %[[EH_RESUME:.*]]
+// OGCG: [[EH_RESUME]]:
+// OGCG:   %[[EXN_PTR:.*]] = load ptr, ptr %[[EXN_SLOT]]
+// OGCG:   %[[EHSELECTOR:.*]] = load i32, ptr %[[EHSELECTOR_SLOT]]
+// OGCG:   %[[EXN_INSERT:.*]] = insertvalue { ptr, i32 } poison, ptr 
%[[EXN_PTR]], 0
+// OGCG:   %[[EXN_INSERT_2:.*]] = insertvalue { ptr, i32 } %[[EXN_INSERT]], 
i32 %[[EHSELECTOR]], 1
+// OGCG:   resume { ptr, i32 } %[[EXN_INSERT_2]]

>From f20c176f1915e564aa8b7ac942da4cbbbdd1384e Mon Sep 17 00:00:00 2001
From: Andy Kaylor <[email protected]>
Date: Thu, 5 Mar 2026 10:50:37 -0800
Subject: [PATCH 2/6] Address review feedback -- simple refactoring

---
 clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 11 ++++-------
 clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 11 +++++------
 2 files changed, 9 insertions(+), 13 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index 9fddc0eb6ebcf..6710e22a64a9f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -682,9 +682,9 @@ RValue CIRGenFunction::emitNewOrDeleteBuiltinCall(const 
FunctionProtoType *type,
 
   clang::DeclContextLookupResult lookupResult =
       astContext.getTranslationUnitDecl()->lookup(name);
-  for (const auto *decl : lookupResult) {
+  for (const NamedDecl *decl : lookupResult) {
     if (const auto *funcDecl = dyn_cast<FunctionDecl>(decl)) {
-      if (astContext.hasSameType(funcDecl->getType(), QualType(type, 0))) {
+      if (astContext.hasSameType(funcDecl->getType().getTypePtr(), type)) {
         if (sanOpts.has(SanitizerKind::AllocToken)) {
           // TODO: Set !alloc_token metadata.
           assert(!cir::MissingFeatures::allocToken());
@@ -1329,20 +1329,16 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const 
CXXNewExpr *e) {
     cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: null check");
 
   // If there's an operator delete, enter a cleanup to call it if an
-  // exception is thrown. If we do this, we'l be creating the result pointer
+  // exception is thrown. If we do this, we'll be creating the result pointer
   // inside a cleanup scope, either with a bitcast or an offset based on the
   // array cookie size. However, we need to return that pointer from outside
   // the cleanup scope, so we need to store it in a temporary variable.
   bool useNewDeleteCleanup =
       e->getOperatorDelete() &&
       !e->getOperatorDelete()->isReservedGlobalPlacementOperator();
-  // These variables are only used if we use the new delete cleanup.
-  mlir::OpBuilder::InsertPoint beforeNewDeleteCleanup;
   EHScopeStack::stable_iterator operatorDeleteCleanup;
-  Address resultPtr = Address::invalid();
   mlir::Operation *cleanupDominator = nullptr;
   if (useNewDeleteCleanup) {
-    beforeNewDeleteCleanup = builder.saveInsertionPoint();
     assert(!cir::MissingFeatures::typeAwareAllocation());
     enterNewDeleteCleanup(*this, e, allocation, allocSize, allocAlign,
                           allocatorArgs);
@@ -1370,6 +1366,7 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const 
CXXNewExpr *e) {
                                                 allocation, elementTy);
 
   // If we're inside a new delete cleanup, store the result pointer.
+  Address resultPtr = Address::invalid();
   if (useNewDeleteCleanup) {
     resultPtr =
         createTempAlloca(builder.getPointerTo(elementTy), 
result.getAlignment(),
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp 
b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index 6f6d2f0a82916..2599125c5bb4a 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -2563,15 +2563,14 @@ void cir::TernaryOp::build(
 
   // Get result type from whichever branch has a yield (the other may have
   // unreachable from a throw expression)
-  cir::YieldOp yield;
-  if (trueRegion->back().mightHaveTerminator())
-    yield = dyn_cast_or_null<cir::YieldOp>(trueRegion->back().getTerminator());
-  if (!yield && falseRegion->back().mightHaveTerminator())
+  auto yield =
+      dyn_cast_or_null<cir::YieldOp>(trueRegion->back().getTerminator());
+  if (!yield)
     yield = 
dyn_cast_or_null<cir::YieldOp>(falseRegion->back().getTerminator());
 
-  assert((!yield || yield.getNumOperands() <= 1) &&
+  assert((yield && yield.getNumOperands() <= 1) &&
          "expected zero or one result type");
-  if (yield && yield.getNumOperands() == 1)
+  if (yield.getNumOperands() == 1)
     result.addTypes(TypeRange{yield.getOperandTypes().front()});
 }
 

>From cab7182b0433d1004b833729780f88a4e5b64810 Mon Sep 17 00:00:00 2001
From: Andy Kaylor <[email protected]>
Date: Thu, 5 Mar 2026 11:49:44 -0800
Subject: [PATCH 3/6] Use TrailingObjects for placement argument handling

---
 clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 28 ++++++++++++++++++-------
 1 file changed, 20 insertions(+), 8 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index 6710e22a64a9f..59c3ff4cf7fab 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -20,6 +20,7 @@
 #include "clang/AST/ExprObjC.h"
 #include "clang/Basic/OperatorKinds.h"
 #include "clang/CIR/MissingFeatures.h"
+#include "llvm/Support/TrailingObjects.h"
 
 using namespace clang;
 using namespace clang::CIRGen;
@@ -701,19 +702,29 @@ RValue CIRGenFunction::emitNewOrDeleteBuiltinCall(const 
FunctionProtoType *type,
 }
 
 namespace {
+template <typename Traits>
+struct PlacementArg {
+  typename Traits::RValueTy argValue;
+  QualType argType;
+};
+
 /// A cleanup to call the given 'operator delete' function upon abnormal
 /// exit from a new expression. Templated on a traits type that deals with
 /// ensuring that the arguments dominate the cleanup if necessary.
 template <typename Traits>
-class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
+class CallDeleteDuringNew final
+    : public EHScopeStack::Cleanup,
+      private llvm::TrailingObjects<CallDeleteDuringNew<Traits>,
+                                    PlacementArg<Traits>> {
+  using TrailingObj =
+      llvm::TrailingObjects<CallDeleteDuringNew<Traits>, PlacementArg<Traits>>;
+  friend TrailingObj;
+  using TrailingObj::getTrailingObjects;
+
   /// Type used to hold llvm::Value*s.
   typedef typename Traits::ValueTy ValueTy;
   /// Type used to hold RValues.
   typedef typename Traits::RValueTy RValueTy;
-  struct PlacementArg {
-    RValueTy argValue;
-    QualType argType;
-  };
 
   unsigned numPlacementArgs : 30;
   LLVM_PREFERRED_TYPE(AlignedAllocationMode)
@@ -723,13 +734,14 @@ class CallDeleteDuringNew final : public 
EHScopeStack::Cleanup {
   ValueTy allocSize;
   CharUnits allocAlign;
 
-  PlacementArg *getPlacementArgs() {
-    return reinterpret_cast<PlacementArg *>(this + 1);
+  PlacementArg<Traits> *getPlacementArgs() {
+    return getTrailingObjects();
   }
 
 public:
   static size_t getExtraSize(size_t numPlacementArgs) {
-    return numPlacementArgs * sizeof(PlacementArg);
+    return TrailingObj::template additionalSizeToAlloc<PlacementArg<Traits>>(
+        numPlacementArgs);
   }
 
   CallDeleteDuringNew(size_t numPlacementArgs,

>From 2fab0af7af03ef6cf24029e067e08d300f218f0a Mon Sep 17 00:00:00 2001
From: Andy Kaylor <[email protected]>
Date: Thu, 5 Mar 2026 12:05:14 -0800
Subject: [PATCH 4/6] Move placement arg initialization to CallDeleteDuringNew
 ctor

---
 clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 21 ++++++++++++---------
 1 file changed, 12 insertions(+), 9 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index 59c3ff4cf7fab..c5d57ad3f539c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -748,11 +748,18 @@ class CallDeleteDuringNew final
                       const FunctionDecl *operatorDelete, ValueTy ptr,
                       ValueTy allocSize,
                       const ImplicitAllocationParameters &iap,
-                      CharUnits allocAlign)
+                      CharUnits allocAlign, const CallArgList *newArgs,
+                      unsigned numNonPlacementArgs, CIRGenFunction *cgf,
+                      mlir::Location loc)
       : numPlacementArgs(numPlacementArgs),
         passAlignmentToPlacementDelete(isAlignedAllocation(iap.PassAlignment)),
         operatorDelete(operatorDelete), ptr(ptr), allocSize(allocSize),
-        allocAlign(allocAlign) {}
+        allocAlign(allocAlign) {
+    for (unsigned i = 0, n = numPlacementArgs; i != n; ++i) {
+      const CallArg &arg = (*newArgs)[i + numNonPlacementArgs];
+      setPlacementArg(i, arg.getRValue(*cgf, loc), arg.ty);
+    }
+  }
 
   void setPlacementArg(unsigned i, RValueTy argValue, QualType argType) {
     assert(i < numPlacementArgs && "index out of range");
@@ -834,15 +841,11 @@ static void enterNewDeleteCleanup(CIRGenFunction &cgf, 
const CXXNewExpr *e,
     typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
 
     assert(!cir::MissingFeatures::typeAwareAllocation());
-    DirectCleanup *cleanup = cgf.ehStack.pushCleanupWithExtra<DirectCleanup>(
+    cgf.ehStack.pushCleanupWithExtra<DirectCleanup>(
         EHCleanup, e->getNumPlacementArgs(), e->getOperatorDelete(),
         newPtr.getPointer(), allocSize, e->implicitAllocationParameters(),
-        allocAlign);
-    for (unsigned i = 0, n = e->getNumPlacementArgs(); i != n; ++i) {
-      const CallArg &arg = newArgs[i + numNonPlacementArgs];
-      cleanup->setPlacementArg(
-          i, arg.getRValue(cgf, cgf.getLoc(e->getSourceRange())), arg.ty);
-    }
+        allocAlign, &newArgs, numNonPlacementArgs, &cgf,
+        cgf.getLoc(e->getSourceRange()));
 
     return;
   }

>From 3a0c97f8138062786533e8075fc8ad0537ceafaa Mon Sep 17 00:00:00 2001
From: Andy Kaylor <[email protected]>
Date: Thu, 5 Mar 2026 12:44:52 -0800
Subject: [PATCH 5/6] Make setPlacementArg private

---
 clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index c5d57ad3f539c..61f61514a3d4f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -738,6 +738,11 @@ class CallDeleteDuringNew final
     return getTrailingObjects();
   }
 
+  void setPlacementArg(unsigned i, RValueTy argValue, QualType argType) {
+    assert(i < numPlacementArgs && "index out of range");
+    getPlacementArgs()[i] = {argValue, argType};
+  }
+
 public:
   static size_t getExtraSize(size_t numPlacementArgs) {
     return TrailingObj::template additionalSizeToAlloc<PlacementArg<Traits>>(
@@ -761,11 +766,6 @@ class CallDeleteDuringNew final
     }
   }
 
-  void setPlacementArg(unsigned i, RValueTy argValue, QualType argType) {
-    assert(i < numPlacementArgs && "index out of range");
-    getPlacementArgs()[i] = {argValue, argType};
-  }
-
   void emit(CIRGenFunction &cgf, Flags flags) override {
     const auto *fpt = operatorDelete->getType()->castAs<FunctionProtoType>();
     CallArgList deleteArgs;

>From 57c3a6a54b7e2fc10202feb5ab203a17b1716df1 Mon Sep 17 00:00:00 2001
From: Andy Kaylor <[email protected]>
Date: Thu, 5 Mar 2026 12:48:39 -0800
Subject: [PATCH 6/6] Fix formatting

---
 clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 7 ++-----
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index 61f61514a3d4f..50df96cc115a8 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -702,8 +702,7 @@ RValue CIRGenFunction::emitNewOrDeleteBuiltinCall(const 
FunctionProtoType *type,
 }
 
 namespace {
-template <typename Traits>
-struct PlacementArg {
+template <typename Traits> struct PlacementArg {
   typename Traits::RValueTy argValue;
   QualType argType;
 };
@@ -734,9 +733,7 @@ class CallDeleteDuringNew final
   ValueTy allocSize;
   CharUnits allocAlign;
 
-  PlacementArg<Traits> *getPlacementArgs() {
-    return getTrailingObjects();
-  }
+  PlacementArg<Traits> *getPlacementArgs() { return getTrailingObjects(); }
 
   void setPlacementArg(unsigned i, RValueTy argValue, QualType argType) {
     assert(i < numPlacementArgs && "index out of range");

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to