https://github.com/banach-space created 
https://github.com/llvm/llvm-project/pull/175575

This change extends the NYI checks recently introduced in #174433 by
adding further validation of SVE builtin type modifiers. For example:

```cpp
SVETypeFlags typeFlags(builtinIntrInfo->typeModifier);

// Unsupported flag — bail out
if (typeFlags.someFlag())
  cgm.errorNYI();
```

The newly added checks mirror the logic in
CodeGen/TargetBuiltins/ARM.cpp, specifically in:
  * `CodeGenFunction::EmitAArch64SVEBuiltinExpr`,

which defines the default code-generation path for SVE builtins.

This change helps ensure CIR rejects unsupported cases early and
provides skeleton for future PRs in which we will be adding support for
the missing builtins.


From 34da39ad34bc7f3acdf7e15086b836519f4fc095 Mon Sep 17 00:00:00 2001
From: Andrzej Warzynski <[email protected]>
Date: Mon, 12 Jan 2026 15:48:02 +0000
Subject: [PATCH] [CIR] Add additional NYI checks for SVE builtins
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This change extends the NYI checks recently introduced in #174433 by
adding further validation of SVE builtin type modifiers. For example:

```cpp
SVETypeFlags typeFlags(builtinIntrInfo->typeModifier);

// Unsupported flag — bail out
if (typeFlags.someFlag())
  cgm.errorNYI();
```

The newly added checks mirror the logic in
CodeGen/TargetBuiltins/ARM.cpp, specifically in:
  * `CodeGenFunction::EmitAArch64SVEBuiltinExpr`,

which defines the default code-generation path for SVE builtins.

This change helps ensure CIR rejects unsupported cases early and
provides skeleton for future PRs in which we will be adding support for
the missing builtins.
---
 .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp  | 91 ++++++++++++++++++-
 1 file changed, 88 insertions(+), 3 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index 7998fb6b5eaac..93089eb585aa7 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -162,17 +162,102 @@ CIRGenFunction::emitAArch64SVEBuiltinExpr(unsigned 
builtinID,
 
   mlir::Location loc = getLoc(expr->getExprLoc());
 
+  // Handle built-ins for which there is a corresponding LLVM Intrinsic.
+  // -------------------------------------------------------------------
   if (builtinIntrInfo->llvmIntrinsic != 0) {
+    // Emit set FPMR for intrinsics that require it.
+    if (typeFlags.setsFPMR())
+      cgm.errorNYI(expr->getSourceRange(),
+                   std::string("unimplemented AArch64 builtin call: ") +
+                       getContext().BuiltinInfo.getName(builtinID));
+
+    if (typeFlags.getMergeType() == SVETypeFlags::MergeZeroExp)
+      cgm.errorNYI(expr->getSourceRange(),
+                   std::string("unimplemented AArch64 builtin call: ") +
+                       getContext().BuiltinInfo.getName(builtinID));
+
+    if (typeFlags.getMergeType() == SVETypeFlags::MergeAnyExp)
+      cgm.errorNYI(expr->getSourceRange(),
+                   std::string("unimplemented AArch64 builtin call: ") +
+                       getContext().BuiltinInfo.getName(builtinID));
+
+    // Some ACLE builtins leave out the argument to specify the predicate
+    // pattern, which is expected to be expanded to an SV_ALL pattern.
+    if (typeFlags.isAppendSVALL())
+      cgm.errorNYI(expr->getSourceRange(),
+                   std::string("unimplemented AArch64 builtin call: ") +
+                       getContext().BuiltinInfo.getName(builtinID));
+    if (typeFlags.isInsertOp1SVALL())
+      cgm.errorNYI(expr->getSourceRange(),
+                   std::string("unimplemented AArch64 builtin call: ") +
+                       getContext().BuiltinInfo.getName(builtinID));
+
+    // Predicates must match the main datatype.
+    for (mlir::Value &op : ops)
+      if (auto predTy = dyn_cast<mlir::VectorType>(op.getType()))
+        if (predTy.getElementType().isInteger(1))
+          cgm.errorNYI(expr->getSourceRange(),
+                       std::string("unimplemented AArch64 builtin call: ") +
+                           getContext().BuiltinInfo.getName(builtinID));
+
+    // Splat scalar operand to vector (intrinsics with _n infix)
+    if (typeFlags.hasSplatOperand()) {
+      cgm.errorNYI(expr->getSourceRange(),
+                   std::string("unimplemented AArch64 builtin call: ") +
+                       getContext().BuiltinInfo.getName(builtinID));
+    }
+
+    if (typeFlags.isReverseCompare())
+      cgm.errorNYI(expr->getSourceRange(),
+                   std::string("unimplemented AArch64 builtin call: ") +
+                       getContext().BuiltinInfo.getName(builtinID));
+    if (typeFlags.isReverseUSDOT())
+      cgm.errorNYI(expr->getSourceRange(),
+                   std::string("unimplemented AArch64 builtin call: ") +
+                       getContext().BuiltinInfo.getName(builtinID));
+    if (typeFlags.isReverseMergeAnyBinOp() &&
+        typeFlags.getMergeType() == SVETypeFlags::MergeAny)
+      cgm.errorNYI(expr->getSourceRange(),
+                   std::string("unimplemented AArch64 builtin call: ") +
+                       getContext().BuiltinInfo.getName(builtinID));
+    if (typeFlags.isReverseMergeAnyAccOp() &&
+        typeFlags.getMergeType() == SVETypeFlags::MergeAny)
+      cgm.errorNYI(expr->getSourceRange(),
+                   std::string("unimplemented AArch64 builtin call: ") +
+                       getContext().BuiltinInfo.getName(builtinID));
+
+    // Predicated intrinsics with _z suffix.
+    if (typeFlags.getMergeType() == SVETypeFlags::MergeZero) {
+      cgm.errorNYI(expr->getSourceRange(),
+                   std::string("unimplemented AArch64 builtin call: ") +
+                       getContext().BuiltinInfo.getName(builtinID));
+    }
+
     std::string llvmIntrName(Intrinsic::getBaseName(
         (llvm::Intrinsic::ID)builtinIntrInfo->llvmIntrinsic));
 
     llvmIntrName.erase(0, /*std::strlen(".llvm")=*/5);
 
-    return emitIntrinsicCallOp(builder, loc, llvmIntrName,
-                               convertType(expr->getType()),
-                               mlir::ValueRange{ops});
+    auto retTy = convertType(expr->getType());
+
+    auto call = emitIntrinsicCallOp(builder, loc, llvmIntrName, retTy,
+                                    mlir::ValueRange{ops});
+    if (call.getType() == retTy)
+      return call;
+
+    // Predicate results must be converted to svbool_t.
+    if (isa<mlir::VectorType>(retTy) &&
+        cast<mlir::VectorType>(retTy).isScalable())
+      cgm.errorNYI(expr->getSourceRange(),
+                   std::string("unimplemented AArch64 builtin call: ") +
+                       getContext().BuiltinInfo.getName(builtinID));
+    // TODO Handle struct types, e.g. svint8x2_t (update the converter first).
+
+    llvm_unreachable("unsupported element count!");
   }
 
+  // Handle the remaining built-ins.
+  // -------------------------------
   switch (builtinID) {
   default:
     return std::nullopt;

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to