https://github.com/mmha updated https://github.com/llvm/llvm-project/pull/132420
>From 594005c964b5c1e6605dc8ac170f1b43aa018bea Mon Sep 17 00:00:00 2001 From: Morris Hafner <mhaf...@nvidia.com> Date: Fri, 21 Mar 2025 17:55:00 +0100 Subject: [PATCH 1/3] [CIR] Add binary operators This patch adds upstreams support for BinOp and BinOverflowOp including lvalue assignments and rudimentary support for pointer arithmetic. Note that this does not include ternary ops, ShiftOp and SelectOp which are required for logical binary operators. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 114 ++- clang/include/clang/CIR/Dialect/IR/CIROps.td | 123 +++ clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 1 + clang/include/clang/CIR/MissingFeatures.h | 13 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 116 ++- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 64 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 730 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 + clang/lib/CIR/CodeGen/CIRGenFunction.h | 30 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 41 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 10 + .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 243 +++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 33 + clang/test/CIR/CodeGen/binop.cpp | 33 + clang/test/CIR/Lowering/binop-bool.cir | 18 + clang/test/CIR/Lowering/binop-fp.cir | 68 ++ clang/test/CIR/Lowering/binop-overflow.cir | 63 ++ clang/test/CIR/Lowering/binop-signed-int.cir | 60 ++ .../test/CIR/Lowering/binop-unsigned-int.cir | 73 ++ 19 files changed, 1794 insertions(+), 41 deletions(-) create mode 100644 clang/test/CIR/CodeGen/binop.cpp create mode 100644 clang/test/CIR/Lowering/binop-bool.cir create mode 100644 clang/test/CIR/Lowering/binop-fp.cir create mode 100644 clang/test/CIR/Lowering/binop-overflow.cir create mode 100644 clang/test/CIR/Lowering/binop-signed-int.cir create mode 100644 clang/test/CIR/Lowering/binop-unsigned-int.cir diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index c6aea10d46b63..9fe80cde261a9 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -10,7 +10,6 @@ #define LLVM_CLANG_CIR_DIALECT_BUILDER_CIRBASEBUILDER_H #include "clang/AST/CharUnits.h" -#include "clang/AST/Type.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" @@ -28,6 +27,11 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { CIRBaseBuilderTy(mlir::MLIRContext &mlirContext) : mlir::OpBuilder(&mlirContext) {} + mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ, + const llvm::APInt &val) { + return create<cir::ConstantOp>(loc, typ, getAttr<cir::IntAttr>(typ, val)); + } + cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr) { return create<cir::ConstantOp>(loc, attr.getType(), attr); } @@ -143,6 +147,114 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createCast(loc, cir::CastKind::bitcast, src, newTy); } + mlir::Value createBinop(mlir::Value lhs, cir::BinOpKind kind, + const llvm::APInt &rhs) { + return create<cir::BinOp>(lhs.getLoc(), lhs.getType(), kind, lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs)); + } + + mlir::Value createBinop(mlir::Value lhs, cir::BinOpKind kind, + mlir::Value rhs) { + return create<cir::BinOp>(lhs.getLoc(), lhs.getType(), kind, lhs, rhs); + } + + mlir::Value createBinop(mlir::Location loc, mlir::Value lhs, + cir::BinOpKind kind, mlir::Value rhs) { + return create<cir::BinOp>(loc, lhs.getType(), kind, lhs, rhs); + } + + mlir::Value createLowBitsSet(mlir::Location loc, unsigned size, + unsigned bits) { + llvm::APInt val = llvm::APInt::getLowBitsSet(size, bits); + auto type = cir::IntType::get(getContext(), size, false); + return getConstAPInt(loc, type, val); + } + + mlir::Value createAnd(mlir::Value lhs, const llvm::APInt &rhs) { + mlir::Value val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, cir::BinOpKind::And, val); + } + + mlir::Value createAnd(mlir::Value lhs, mlir::Value rhs) { + return createBinop(lhs, cir::BinOpKind::And, rhs); + } + + mlir::Value createAnd(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) { + return createBinop(loc, lhs, cir::BinOpKind::And, rhs); + } + + mlir::Value createOr(mlir::Value lhs, const llvm::APInt &rhs) { + mlir::Value val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, cir::BinOpKind::Or, val); + } + + mlir::Value createOr(mlir::Value lhs, mlir::Value rhs) { + return createBinop(lhs, cir::BinOpKind::Or, rhs); + } + + mlir::Value createMul(mlir::Value lhs, mlir::Value rhs, bool hasNUW = false, + bool hasNSW = false) { + auto op = create<cir::BinOp>(lhs.getLoc(), lhs.getType(), + cir::BinOpKind::Mul, lhs, rhs); + if (hasNUW) + op.setNoUnsignedWrap(true); + if (hasNSW) + op.setNoSignedWrap(true); + return op; + } + mlir::Value createNSWMul(mlir::Value lhs, mlir::Value rhs) { + return createMul(lhs, rhs, false, true); + } + mlir::Value createNUWAMul(mlir::Value lhs, mlir::Value rhs) { + return createMul(lhs, rhs, true, false); + } + + mlir::Value createMul(mlir::Value lhs, const llvm::APInt &rhs) { + mlir::Value val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, cir::BinOpKind::Mul, val); + } + + mlir::Value createSub(mlir::Value lhs, mlir::Value rhs, bool hasNUW = false, + bool hasNSW = false, bool saturated = false) { + auto op = create<cir::BinOp>(lhs.getLoc(), lhs.getType(), + cir::BinOpKind::Sub, lhs, rhs); + if (hasNUW) + op.setNoUnsignedWrap(true); + if (hasNSW) + op.setNoSignedWrap(true); + if (saturated) + op.setSaturated(true); + return op; + } + + mlir::Value createNSWSub(mlir::Value lhs, mlir::Value rhs) { + return createSub(lhs, rhs, false, true); + } + + mlir::Value createNUWSub(mlir::Value lhs, mlir::Value rhs) { + return createSub(lhs, rhs, true, false); + } + + mlir::Value createAdd(mlir::Value lhs, mlir::Value rhs, bool hasNUW = false, + bool hasNSW = false, bool saturated = false) { + auto op = create<cir::BinOp>(lhs.getLoc(), lhs.getType(), + cir::BinOpKind::Add, lhs, rhs); + if (hasNUW) + op.setNoUnsignedWrap(true); + if (hasNSW) + op.setNoSignedWrap(true); + if (saturated) + op.setSaturated(true); + return op; + } + + mlir::Value createNSWAdd(mlir::Value lhs, mlir::Value rhs) { + return createAdd(lhs, rhs, false, true); + } + mlir::Value createNUWAdd(mlir::Value lhs, mlir::Value rhs) { + return createAdd(lhs, rhs, true, false); + } + // // Block handling helpers // ---------------------- diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index d7d63e040a2ba..dca17e6cd2d2d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -826,6 +826,129 @@ def ForOp : CIR_Op<"for", [LoopOpInterface, NoRegionArguments]> { }]; } +//===----------------------------------------------------------------------===// +// BinOp +//===----------------------------------------------------------------------===// + +// FIXME: represent Commutative, Idempotent traits for appropriate binops +def BinOpKind_Mul : I32EnumAttrCase<"Mul", 1, "mul">; +def BinOpKind_Div : I32EnumAttrCase<"Div", 2, "div">; +def BinOpKind_Rem : I32EnumAttrCase<"Rem", 3, "rem">; +def BinOpKind_Add : I32EnumAttrCase<"Add", 4, "add">; +def BinOpKind_Sub : I32EnumAttrCase<"Sub", 5, "sub">; +def BinOpKind_And : I32EnumAttrCase<"And", 8, "and">; +def BinOpKind_Xor : I32EnumAttrCase<"Xor", 9, "xor">; +def BinOpKind_Or : I32EnumAttrCase<"Or", 10, "or">; +// TODO(cir): Do we need a min binop? +def BinOpKind_Max : I32EnumAttrCase<"Max", 11, "max">; + +def BinOpKind : I32EnumAttr< + "BinOpKind", + "binary operation (arith and logic) kind", + [BinOpKind_Mul, BinOpKind_Div, BinOpKind_Rem, + BinOpKind_Add, BinOpKind_Sub, + BinOpKind_And, BinOpKind_Xor, + BinOpKind_Or, BinOpKind_Max]> { + let cppNamespace = "::cir"; +} + +// FIXME: Pure won't work when we add overloading. +def BinOp : CIR_Op<"binop", [Pure, + SameTypeOperands, SameOperandsAndResultType]> { + + let summary = "Binary operations (arith and logic)"; + let description = [{ + cir.binop performs the binary operation according to + the specified opcode kind: [mul, div, rem, add, sub, + and, xor, or, max]. + + It requires two input operands and has one result, all types + should be the same. + + ```mlir + %7 = cir.binop(add, %1, %2) : !s32i + %7 = cir.binop(mul, %1, %2) : !u8i + ``` + }]; + + // TODO: get more accurate than CIR_AnyType + let results = (outs CIR_AnyType:$result); + let arguments = (ins Arg<BinOpKind, "binop kind">:$kind, + CIR_AnyType:$lhs, CIR_AnyType:$rhs, + UnitAttr:$no_unsigned_wrap, + UnitAttr:$no_signed_wrap, + UnitAttr:$saturated); + + let assemblyFormat = [{ + `(` $kind `,` $lhs `,` $rhs `)` + (`nsw` $no_signed_wrap^)? + (`nuw` $no_unsigned_wrap^)? + (`sat` $saturated^)? + `:` type($lhs) attr-dict + }]; + + let hasVerifier = 1; +} + + +//===----------------------------------------------------------------------===// +// BinOpOverflowOp +//===----------------------------------------------------------------------===// + +def BinOpOverflowKind : I32EnumAttr< + "BinOpOverflowKind", + "checked binary arithmetic operation kind", + [BinOpKind_Add, BinOpKind_Sub, BinOpKind_Mul]> { + let cppNamespace = "::cir"; +} + +def BinOpOverflowOp : CIR_Op<"binop.overflow", [Pure, SameTypeOperands]> { + let summary = "Perform binary integral arithmetic with overflow checking"; + let description = [{ + `cir.binop.overflow` performs binary arithmetic operations with overflow + checking on integral operands. + + The `kind` argument specifies the kind of arithmetic operation to perform. + It can be either `add`, `sub`, or `mul`. The `lhs` and `rhs` arguments + specify the input operands of the arithmetic operation. The types of `lhs` + and `rhs` must be the same. + + `cir.binop.overflow` produces two SSA values. `result` is the result of the + arithmetic operation truncated to its specified type. `overflow` is a + boolean value indicating whether overflow happens during the operation. + + The exact semantic of this operation is as follows: + + - `lhs` and `rhs` are promoted to an imaginary integral type that has + infinite precision. + - The arithmetic operation is performed on the promoted operands. + - The infinite-precision result is truncated to the type of `result`. The + truncated result is assigned to `result`. + - If the truncated result is equal to the un-truncated result, `overflow` + is assigned to false. Otherwise, `overflow` is assigned to true. + }]; + + let arguments = (ins Arg<BinOpOverflowKind, "arithmetic kind">:$kind, + CIR_IntType:$lhs, CIR_IntType:$rhs); + let results = (outs CIR_IntType:$result, CIR_BoolType:$overflow); + + let assemblyFormat = [{ + `(` $kind `,` $lhs `,` $rhs `)` `:` type($lhs) `,` + `(` type($result) `,` type($overflow) `)` + attr-dict + }]; + + let builders = [ + OpBuilder<(ins "cir::IntType":$resultTy, + "cir::BinOpOverflowKind":$kind, + "mlir::Value":$lhs, + "mlir::Value":$rhs), [{ + auto overflowTy = cir::BoolType::get($_builder.getContext()); + build($_builder, $_state, resultTy, overflowTy, kind, lhs, rhs); + }]> + ]; +} + //===----------------------------------------------------------------------===// // GlobalOp //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 5d1eb17e146d0..7b0fcbc7cc98f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -21,6 +21,7 @@ namespace cir { bool isAnyFloatingPointType(mlir::Type t); +bool isFPOrFPVectorTy(mlir::Type); } // namespace cir diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 3e33e5dc60194..3654038a51fbd 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -79,6 +79,9 @@ struct MissingFeatures { static bool opUnarySignedOverflow() { return false; } static bool opUnaryPromotionType() { return false; } + // Clang early optimizations or things defered to LLVM lowering. + static bool mayHaveIntegerOverflow() { return false; } + // Misc static bool cxxABI() { return false; } static bool tryEmitAsConstant() { return false; } @@ -93,16 +96,20 @@ struct MissingFeatures { static bool stackSaveOp() { return false; } static bool aggValueSlot() { return false; } static bool generateDebugInfo() { return false; } + static bool getFPFeaturesInEffect() { return false; } + static bool pointerOverflowSanitizer() { return false; } static bool fpConstraints() { return false; } static bool sanitizers() { return false; } static bool addHeapAllocSiteMetadata() { return false; } static bool targetCodeGenInfoGetNullPointer() { return false; } - static bool CGFPOptionsRAII() { return false; } static bool loopInfoStack() { return false; } static bool requiresCleanups() { return false; } static bool createProfileWeightsForLoop() { return false; } static bool emitCondLikelihoodViaExpectIntrinsic() { return false; } static bool pgoUse() { return false; } + static bool cgFPOptionsRAII() { return false; } + static bool metaDataNode() { return false; } + static bool foldBinOpFMF() { return false; } // Missing types static bool dataMemberType() { return false; } @@ -111,6 +118,8 @@ struct MissingFeatures { static bool scalableVectors() { return false; } static bool unsizedTypes() { return false; } static bool vectorType() { return false; } + static bool complexType() { return false; } + static bool fixedPointType() { return false; } // Future CIR operations static bool awaitOp() { return false; } @@ -127,6 +136,8 @@ struct MissingFeatures { static bool ternaryOp() { return false; } static bool tryOp() { return false; } static bool zextOp() { return false; } + static bool opPtrStride() { return false; } + static bool opPtrDiff() { return false; } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index fef290612149a..dfffc2639b0d2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -14,11 +14,13 @@ #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/MissingFeatures.h" +#include "llvm/ADT/STLExtras.h" namespace clang::CIRGen { class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { const CIRGenTypeCache &typeCache; + bool isFPConstrained = false; public: CIRGenBuilderTy(mlir::MLIRContext &mlirContext, const CIRGenTypeCache &tc) @@ -72,15 +74,72 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { if (const auto arrayVal = mlir::dyn_cast<cir::ConstArrayAttr>(attr)) { if (mlir::isa<mlir::StringAttr>(arrayVal.getElts())) return false; - for (const auto elt : mlir::cast<mlir::ArrayAttr>(arrayVal.getElts())) { - if (!isNullValue(elt)) - return false; - } - return true; + + return llvm::all_of( + mlir::cast<mlir::ArrayAttr>(arrayVal.getElts()), + [&](const mlir::Attribute &elt) { return isNullValue(elt); }); } return false; } + // + // Type helpers + // ------------ + // + cir::IntType getUIntNTy(int n) { + switch (n) { + case 8: + return getUInt8Ty(); + case 16: + return getUInt16Ty(); + case 32: + return getUInt32Ty(); + case 64: + return getUInt64Ty(); + default: + return cir::IntType::get(getContext(), n, false); + } + } + + cir::IntType getSIntNTy(int n) { + switch (n) { + case 8: + return getSInt8Ty(); + case 16: + return getSInt16Ty(); + case 32: + return getSInt32Ty(); + case 64: + return getSInt64Ty(); + default: + return cir::IntType::get(getContext(), n, true); + } + } + + cir::VoidType getVoidTy() { return typeCache.VoidTy; } + + cir::IntType getSInt8Ty() { return typeCache.SInt8Ty; } + cir::IntType getSInt16Ty() { return typeCache.SInt16Ty; } + cir::IntType getSInt32Ty() { return typeCache.SInt32Ty; } + cir::IntType getSInt64Ty() { return typeCache.SInt64Ty; } + + cir::IntType getUInt8Ty() { return typeCache.UInt8Ty; } + cir::IntType getUInt16Ty() { return typeCache.UInt16Ty; } + cir::IntType getUInt32Ty() { return typeCache.UInt32Ty; } + cir::IntType getUInt64Ty() { return typeCache.UInt64Ty; } + + bool isInt8Ty(mlir::Type i) { + return i == typeCache.UInt8Ty || i == typeCache.SInt8Ty; + } + bool isInt16Ty(mlir::Type i) { + return i == typeCache.UInt16Ty || i == typeCache.SInt16Ty; + } + bool isInt32Ty(mlir::Type i) { + return i == typeCache.UInt32Ty || i == typeCache.SInt32Ty; + } + bool isInt64Ty(mlir::Type i) { + return i == typeCache.UInt64Ty || i == typeCache.SInt64Ty; + } bool isInt(mlir::Type i) { return mlir::isa<cir::IntType>(i); } // Creates constant nullptr for pointer type ty. @@ -88,6 +147,53 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { assert(!cir::MissingFeatures::targetCodeGenInfoGetNullPointer()); return create<cir::ConstantOp>(loc, ty, getConstPtrAttr(ty, 0)); } + + mlir::Value createNeg(mlir::Value value) { + + if (auto intTy = mlir::dyn_cast<cir::IntType>(value.getType())) { + // Source is a unsigned integer: first cast it to signed. + if (intTy.isUnsigned()) + value = createIntCast(value, getSIntNTy(intTy.getWidth())); + return create<cir::UnaryOp>(value.getLoc(), value.getType(), + cir::UnaryOpKind::Minus, value); + } + + llvm_unreachable("negation for the given type is NYI"); + } + + mlir::Value createFSub(mlir::Value lhs, mlir::Value rhs) { + assert(!cir::MissingFeatures::metaDataNode()); + if (isFPConstrained) + llvm_unreachable("Constrained FP NYI"); + + assert(!cir::MissingFeatures::foldBinOpFMF()); + return create<cir::BinOp>(lhs.getLoc(), cir::BinOpKind::Sub, lhs, rhs); + } + + mlir::Value createFAdd(mlir::Value lhs, mlir::Value rhs) { + assert(!cir::MissingFeatures::metaDataNode()); + if (isFPConstrained) + llvm_unreachable("Constrained FP NYI"); + + assert(!cir::MissingFeatures::foldBinOpFMF()); + return create<cir::BinOp>(lhs.getLoc(), cir::BinOpKind::Add, lhs, rhs); + } + mlir::Value createFMul(mlir::Value lhs, mlir::Value rhs) { + assert(!cir::MissingFeatures::metaDataNode()); + if (isFPConstrained) + llvm_unreachable("Constrained FP NYI"); + + assert(!cir::MissingFeatures::foldBinOpFMF()); + return create<cir::BinOp>(lhs.getLoc(), cir::BinOpKind::Mul, lhs, rhs); + } + mlir::Value createFDiv(mlir::Value lhs, mlir::Value rhs) { + assert(!cir::MissingFeatures::metaDataNode()); + if (isFPConstrained) + llvm_unreachable("Constrained FP NYI"); + + assert(!cir::MissingFeatures::foldBinOpFMF()); + return create<cir::BinOp>(lhs.getLoc(), cir::BinOpKind::Div, lhs, rhs); + } }; } // namespace clang::CIRGen diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 306130b80d457..d3365cbcbbeed 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -149,8 +149,8 @@ LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *e) { Address addr = Address::invalid(); // The variable should generally be present in the local decl map. - auto iter = LocalDeclMap.find(vd); - if (iter != LocalDeclMap.end()) { + auto iter = localDeclMap.find(vd); + if (iter != localDeclMap.end()) { addr = iter->second; } else { // Otherwise, it might be static local we haven't emitted yet for some @@ -176,7 +176,7 @@ mlir::Value CIRGenFunction::evaluateExprAsBool(const Expr *e) { return createDummyValue(getLoc(loc), boolTy); } - assert(!cir::MissingFeatures::CGFPOptionsRAII()); + assert(!cir::MissingFeatures::cgFPOptionsRAII()); if (!e->getType()->isAnyComplexType()) return emitScalarConversion(emitScalarExpr(e), e->getType(), boolTy, loc); @@ -211,9 +211,8 @@ LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *e) { if (e->getType()->isAnyComplexType()) { cgm.errorNYI(e->getSourceRange(), "UnaryOp complex inc/dec"); return LValue(); - } else { - emitScalarPrePostIncDec(e, lv, isInc, /*isPre=*/true); } + emitScalarPrePostIncDec(e, lv, isInc, /*isPre=*/true); return lv; } @@ -232,6 +231,61 @@ LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *e) { llvm_unreachable("Unknown unary operator kind!"); } +LValue CIRGenFunction::emitBinaryOperatorLValue(const BinaryOperator *e) { + // Comma expressions just emit their LHS then their RHS as an l-value. + if (e->getOpcode() == BO_Comma) { + emitIgnoredExpr(e->getLHS()); + return emitLValue(e->getRHS()); + } + + if (e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI) { + cgm.errorNYI(e->getSourceRange(), "member pointers"); + return {}; + } + + assert(e->getOpcode() == BO_Assign && "unexpected binary l-value"); + + // Note that in all of these cases, __block variables need the RHS + // evaluated first just in case the variable gets moved by the RHS. + + switch (CIRGenFunction::getEvaluationKind(e->getType())) { + case cir::TEK_Scalar: { + if (e->getLHS()->getType().getObjCLifetime() != + clang::Qualifiers::ObjCLifetime::OCL_None) { + cgm.errorNYI(e->getSourceRange(), "objc lifetimes"); + return {}; + } + + RValue rv = emitAnyExpr(e->getRHS()); + LValue lv = emitLValue(e->getLHS()); + + SourceLocRAIIObject loc{*this, getLoc(e->getSourceRange())}; + if (lv.isBitField()) { + cgm.errorNYI(e->getSourceRange(), "bitfields"); + return {}; + } + emitStoreThroughLValue(rv, lv); + + if (getLangOpts().OpenMP) { + cgm.errorNYI(e->getSourceRange(), "openmp"); + return {}; + } + + return lv; + } + + case cir::TEK_Complex: { + assert(!cir::MissingFeatures::complexType()); + cgm.errorNYI(e->getSourceRange(), "complex l-values"); + return {}; + } + case cir::TEK_Aggregate: + cgm.errorNYI(e->getSourceRange(), "aggregate lvalues"); + return {}; + } + llvm_unreachable("bad evaluation kind"); +} + /// Emit code to compute the specified expression which /// can have any type. The result is returned as an RValue struct. RValue CIRGenFunction::emitAnyExpr(const Expr *e) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index ca0090f8d35b3..787a93c52b53e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -26,6 +26,53 @@ using namespace clang::CIRGen; namespace { +struct BinOpInfo { + mlir::Value lhs; + mlir::Value rhs; + SourceRange loc; + QualType fullType; // Type of operands and result + QualType compType; // Type used for computations. Element type + // for vectors, otherwise same as FullType. + BinaryOperator::Opcode opcode; // Opcode of BinOp to perform + FPOptions fpfeatures; + const Expr *e; // Entire expr, for error unsupported. May not be binop. + + /// Check if the binop computes a division or a remainder. + bool isDivremOp() const { + return opcode == BO_Div || opcode == BO_Rem || opcode == BO_DivAssign || + opcode == BO_RemAssign; + } + + /// Check if the binop can result in integer overflow. + bool mayHaveIntegerOverflow() const { + // Without constant input, we can't rule out overflow. + auto lhsci = dyn_cast<cir::ConstantOp>(lhs.getDefiningOp()); + auto rhsci = dyn_cast<cir::ConstantOp>(rhs.getDefiningOp()); + if (!lhsci || !rhsci) + return true; + + assert(!cir::MissingFeatures::mayHaveIntegerOverflow()); + // TODO(cir): For now we just assume that we might overflow + return true; + } + + /// Check if at least one operand is a fixed point type. In such cases, + /// this operation did not follow usual arithmetic conversion and both + /// operands might not be of the same type. + bool isFixedPointOp() const { + // We cannot simply check the result type since comparison operations + // return an int. + if (const auto *binOp = llvm::dyn_cast<BinaryOperator>(e)) { + QualType lhstype = binOp->getLHS()->getType(); + QualType rhstype = binOp->getRHS()->getType(); + return lhstype->isFixedPointType() || rhstype->isFixedPointType(); + } + if (const auto *unop = llvm::dyn_cast<UnaryOperator>(e)) + return unop->getSubExpr()->getType()->isFixedPointType(); + return false; + } +}; + class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> { CIRGenFunction &cgf; CIRGenBuilderTy &builder; @@ -35,6 +82,22 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> { ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder) : cgf(cgf), builder(builder) {} + //===--------------------------------------------------------------------===// + // Utilities + //===--------------------------------------------------------------------===// + + mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) { + cgf.cgm.errorNYI(result.getLoc(), "floating cast for promoted value"); + return nullptr; + } + + mlir::Value emitUnPromotedValue(mlir::Value result, QualType exprType) { + cgf.cgm.errorNYI(result.getLoc(), "floating cast for unpromoted value"); + return nullptr; + } + + mlir::Value emitPromoted(const Expr *e, QualType promotionType); + //===--------------------------------------------------------------------===// // Visitor Methods //===--------------------------------------------------------------------===// @@ -60,6 +123,10 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> { return cgf.emitLoadOfLValue(lv, e->getExprLoc()).getScalarVal(); } + mlir::Value emitLoadOfLValue(LValue lv, SourceLocation loc) { + return cgf.emitLoadOfLValue(lv, loc).getScalarVal(); + } + // l-values mlir::Value VisitDeclRefExpr(DeclRefExpr *e) { assert(!cir::MissingFeatures::tryEmitAsConstant()); @@ -308,14 +375,14 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> { // NOTE(CIR): clang calls CreateAdd but folds this to a unary op value = emitUnaryOp(e, kind, input); } - } else if (const PointerType *ptr = type->getAs<PointerType>()) { + } else if (isa<PointerType>(type)) { cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec pointer"); return {}; } else if (type->isVectorType()) { cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec vector"); return {}; } else if (type->isRealFloatingType()) { - assert(!cir::MissingFeatures::CGFPOptionsRAII()); + assert(!cir::MissingFeatures::cgFPOptionsRAII()); if (type->isHalfType() && !cgf.getContext().getLangOpts().NativeHalfType) { @@ -349,9 +416,8 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> { if (lv.isBitField()) { cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec bitfield"); return {}; - } else { - cgf.emitStoreThroughLValue(RValue::get(value), lv); } + cgf.emitStoreThroughLValue(RValue::get(value), lv); // If this is a postinc, return the value read from memory, otherwise use // the updated value. @@ -558,8 +624,225 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> { return res; } + + BinOpInfo emitBinOps(const BinaryOperator *e, + QualType promotionType = QualType()) { + BinOpInfo result; + result.lhs = cgf.emitPromotedScalarExpr(e->getLHS(), promotionType); + result.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionType); + if (!promotionType.isNull()) + result.fullType = promotionType; + else + result.fullType = e->getType(); + result.compType = result.fullType; + if (const auto *vecType = dyn_cast_or_null<VectorType>(result.fullType)) { + result.compType = vecType->getElementType(); + } + result.opcode = e->getOpcode(); + result.loc = e->getSourceRange(); + // TODO(cir): Result.FPFeatures + assert(!cir::MissingFeatures::getFPFeaturesInEffect()); + result.e = e; + return result; + } + + mlir::Value emitMul(const BinOpInfo &ops); + mlir::Value emitDiv(const BinOpInfo &ops); + mlir::Value emitRem(const BinOpInfo &ops); + mlir::Value emitAdd(const BinOpInfo &ops); + mlir::Value emitSub(const BinOpInfo &ops); + mlir::Value emitShl(const BinOpInfo &ops); + mlir::Value emitShr(const BinOpInfo &ops); + mlir::Value emitAnd(const BinOpInfo &ops); + mlir::Value emitXor(const BinOpInfo &ops); + mlir::Value emitOr(const BinOpInfo &ops); + + LValue emitCompoundAssignLValue( + const CompoundAssignOperator *e, + mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &), + mlir::Value &result); + mlir::Value + emitCompoundAssign(const CompoundAssignOperator *e, + mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &)); + + // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM + // codegen. + QualType getPromotionType(QualType ty) { + if (ty->getAs<ComplexType>()) { + assert(!cir::MissingFeatures::complexType()); + cgf.cgm.errorNYI("promotion to complex type"); + return QualType(); + } + if (ty.UseExcessPrecision(cgf.getContext())) { + if (ty->getAs<VectorType>()) { + assert(!cir::MissingFeatures::vectorType()); + cgf.cgm.errorNYI("promotion to vector type"); + return QualType(); + } + return cgf.getContext().FloatTy; + } + return QualType(); + } + +// Binary operators and binary compound assignment operators. +#define HANDLEBINOP(OP) \ + mlir::Value VisitBin##OP(const BinaryOperator *e) { \ + QualType promotionTy = getPromotionType(e->getType()); \ + auto result = emit##OP(emitBinOps(e, promotionTy)); \ + if (result && !promotionTy.isNull()) \ + result = emitUnPromotedValue(result, e->getType()); \ + return result; \ + } \ + mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *e) { \ + return emitCompoundAssign(e, &ScalarExprEmitter::emit##OP); \ + } + + HANDLEBINOP(Mul) + HANDLEBINOP(Div) + HANDLEBINOP(Rem) + HANDLEBINOP(Add) + HANDLEBINOP(Sub) + HANDLEBINOP(Shl) + HANDLEBINOP(Shr) + HANDLEBINOP(And) + HANDLEBINOP(Xor) + HANDLEBINOP(Or) +#undef HANDLEBINOP }; +LValue ScalarExprEmitter::emitCompoundAssignLValue( + const CompoundAssignOperator *e, + mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &), + mlir::Value &result) { + QualType lhsTy = e->getLHS()->getType(); + BinOpInfo opInfo; + + if (e->getComputationResultType()->isAnyComplexType()) { + cgf.cgm.errorNYI(result.getLoc(), "complex lvalue assign"); + return LValue(); + } + + // Emit the RHS first. __block variables need to have the rhs evaluated + // first, plus this should improve codegen a little. + + QualType promotionTypeCR = getPromotionType(e->getComputationResultType()); + if (promotionTypeCR.isNull()) + promotionTypeCR = e->getComputationResultType(); + + QualType promotionTypeLHS = getPromotionType(e->getComputationLHSType()); + QualType promotionTypeRHS = getPromotionType(e->getRHS()->getType()); + + if (!promotionTypeRHS.isNull()) + opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS); + else + opInfo.rhs = Visit(e->getRHS()); + + opInfo.fullType = promotionTypeCR; + opInfo.compType = opInfo.fullType; + if (const auto *vecType = dyn_cast_or_null<VectorType>(opInfo.fullType)) { + opInfo.compType = vecType->getElementType(); + } + opInfo.opcode = e->getOpcode(); + opInfo.fpfeatures = e->getFPFeaturesInEffect(cgf.getLangOpts()); + opInfo.e = e; + opInfo.loc = e->getSourceRange(); + + // Load/convert the LHS + LValue lhsLV = cgf.emitLValue(e->getLHS()); + + if (lhsTy->getAs<AtomicType>()) { + cgf.cgm.errorNYI(result.getLoc(), "atomic lvalue assign"); + return LValue(); + } + + opInfo.lhs = emitLoadOfLValue(lhsLV, e->getExprLoc()); + + CIRGenFunction::SourceLocRAIIObject sourceloc{ + cgf, cgf.getLoc(e->getSourceRange())}; + SourceLocation loc = e->getExprLoc(); + if (!promotionTypeLHS.isNull()) + opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy, promotionTypeLHS, + e->getExprLoc()); + else + opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy, + e->getComputationLHSType(), loc); + + // Expand the binary operator. + result = (this->*func)(opInfo); + + // Convert the result back to the LHS type, + // potentially with Implicit Conversion sanitizer check. + result = emitScalarConversion(result, promotionTypeCR, lhsTy, loc); + // ScalarConversionOpts(cgf.sanOpts)); + + // Store the result value into the LHS lvalue. Bit-fields are handled + // specially because the result is altered by the store, i.e., [C99 6.5.16p1] + // 'An assignment expression has the value of the left operand after the + // assignment...'. + if (lhsLV.isBitField()) + cgf.cgm.errorNYI(e->getSourceRange(), "store through bitfield lvalue"); + else + cgf.emitStoreThroughLValue(RValue::get(result), lhsLV); + + if (cgf.getLangOpts().OpenMP) + cgf.cgm.errorNYI(e->getSourceRange(), "openmp"); + + return lhsLV; +} + +mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e, + QualType promotionType) { + e = e->IgnoreParens(); + if (const auto *bo = dyn_cast<BinaryOperator>(e)) { + switch (bo->getOpcode()) { +#define HANDLE_BINOP(OP) \ + case BO_##OP: \ + return emit##OP(emitBinOps(bo, promotionType)); + HANDLE_BINOP(Add) + HANDLE_BINOP(Sub) + HANDLE_BINOP(Mul) + HANDLE_BINOP(Div) +#undef HANDLE_BINOP + default: + break; + } + } else if (isa<UnaryOperator>(e)) { + cgf.cgm.errorNYI(e->getSourceRange(), "unary operators"); + return {}; + } + mlir::Value result = Visit(const_cast<Expr *>(e)); + if (result) { + if (!promotionType.isNull()) + return emitPromotedValue(result, promotionType); + return emitUnPromotedValue(result, e->getType()); + } + return result; +} + +mlir::Value ScalarExprEmitter::emitCompoundAssign( + const CompoundAssignOperator *e, + mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &)) { + + bool ignore = std::exchange(ignoreResultAssign, false); + mlir::Value rhs; + LValue lhs = emitCompoundAssignLValue(e, func, rhs); + + // If the result is clearly ignored, return now. + if (ignore) + return {}; + + // The result of an assignment in C is the assigned r-value. + if (!cgf.getLangOpts().CPlusPlus) + return rhs; + + // If the lvalue is non-volatile, return the computed value of the assignment. + if (!lhs.isVolatile()) + return rhs; + + // Otherwise, reload the value. + return emitLoadOfLValue(lhs, e->getExprLoc()); +} + } // namespace /// Emit the computation of the specified expression of scalar type. @@ -570,13 +853,425 @@ mlir::Value CIRGenFunction::emitScalarExpr(const Expr *e) { return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e)); } -[[maybe_unused]] static bool MustVisitNullValue(const Expr *e) { +mlir::Value CIRGenFunction::emitPromotedScalarExpr(const Expr *e, + QualType promotionType) { + if (!promotionType.isNull()) + return ScalarExprEmitter(*this, builder).emitPromoted(e, promotionType); + return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e)); +} + +[[maybe_unused]] static bool mustVisitNullValue(const Expr *e) { // If a null pointer expression's type is the C++0x nullptr_t, then // it's not necessarily a simple constant and it must be evaluated // for its potential side effects. return e->getType()->isNullPtrType(); } +/// If \p E is a widened promoted integer, get its base (unpromoted) type. +static std::optional<QualType> +getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e) { + const Expr *base = e->IgnoreImpCasts(); + if (e == base) + return std::nullopt; + + QualType baseTy = base->getType(); + if (!astContext.isPromotableIntegerType(baseTy) || + astContext.getTypeSize(baseTy) >= astContext.getTypeSize(e->getType())) + return std::nullopt; + + return baseTy; +} + +/// Check if \p E is a widened promoted integer. +[[maybe_unused]] static bool isWidenedIntegerOp(const ASTContext &astContext, + const Expr *e) { + return getUnwidenedIntegerType(astContext, e).has_value(); +} + +/// Check if we can skip the overflow check for \p Op. +[[maybe_unused]] static bool canElideOverflowCheck(const ASTContext &astContext, + const BinOpInfo &op) { + assert((isa<UnaryOperator>(op.e) || isa<BinaryOperator>(op.e)) && + "Expected a unary or binary operator"); + + // If the binop has constant inputs and we can prove there is no overflow, + // we can elide the overflow check. + if (!op.mayHaveIntegerOverflow()) + return true; + + // If a unary op has a widened operand, the op cannot overflow. + if (const auto *uo = dyn_cast<UnaryOperator>(op.e)) + return !uo->canOverflow(); + + // We usually don't need overflow checks for binops with widened operands. + // Multiplication with promoted unsigned operands is a special case. + const auto *bo = cast<BinaryOperator>(op.e); + auto optionalLHSTy = getUnwidenedIntegerType(astContext, bo->getLHS()); + if (!optionalLHSTy) + return false; + + auto optionalRHSTy = getUnwidenedIntegerType(astContext, bo->getRHS()); + if (!optionalRHSTy) + return false; + + QualType lhsTy = *optionalLHSTy; + QualType rhsTy = *optionalRHSTy; + + // This is the simple case: binops without unsigned multiplication, and with + // widened operands. No overflow check is needed here. + if ((op.opcode != BO_Mul && op.opcode != BO_MulAssign) || + !lhsTy->isUnsignedIntegerType() || !rhsTy->isUnsignedIntegerType()) + return true; + + // For unsigned multiplication the overflow check can be elided if either one + // of the unpromoted types are less than half the size of the promoted type. + unsigned promotedSize = astContext.getTypeSize(op.e->getType()); + return (2 * astContext.getTypeSize(lhsTy)) < promotedSize || + (2 * astContext.getTypeSize(rhsTy)) < promotedSize; +} + +/// Emit pointer + index arithmetic. +static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, + const BinOpInfo &op, + bool isSubtraction) { + // Must have binary (not unary) expr here. Unary pointer + // increment/decrement doesn't use this path. + const BinaryOperator *expr = cast<BinaryOperator>(op.e); + + mlir::Value pointer = op.lhs; + Expr *pointerOperand = expr->getLHS(); + mlir::Value index = op.rhs; + Expr *indexOperand = expr->getRHS(); + + // In a subtraction, the LHS is always the pointer. + if (!isSubtraction && !mlir::isa<cir::PointerType>(pointer.getType())) { + std::swap(pointer, index); + std::swap(pointerOperand, indexOperand); + } + + bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); + + // Some versions of glibc and gcc use idioms (particularly in their malloc + // routines) that add a pointer-sized integer (known to be a pointer value) + // to a null pointer in order to cast the value back to an integer or as + // part of a pointer alignment algorithm. This is undefined behavior, but + // we'd like to be able to compile programs that use it. + // + // Normally, we'd generate a GEP with a null-pointer base here in response + // to that code, but it's also UB to dereference a pointer created that + // way. Instead (as an acknowledged hack to tolerate the idiom) we will + // generate a direct cast of the integer value to a pointer. + // + // The idiom (p = nullptr + N) is not met if any of the following are true: + // + // The operation is subtraction. + // The index is not pointer-sized. + // The pointer type is not byte-sized. + // + if (BinaryOperator::isNullPointerArithmeticExtension( + cgf.getContext(), op.opcode, expr->getLHS(), expr->getRHS())) + return cgf.getBuilder().createIntToPtr(index, pointer.getType()); + + // Differently from LLVM codegen, ABI bits for index sizes is handled during + // LLVM lowering. + + // If this is subtraction, negate the index. + if (isSubtraction) + index = cgf.getBuilder().createNeg(index); + + if (cgf.sanOpts.has(SanitizerKind::ArrayBounds)) + cgf.cgm.errorNYI("array bounds sanitizer"); + + const PointerType *pointerType = + pointerOperand->getType()->getAs<PointerType>(); + if (!pointerType) + cgf.cgm.errorNYI("ObjC"); + + QualType elementType = pointerType->getPointeeType(); + if (const VariableArrayType *vla = + cgf.getContext().getAsVariableArrayType(elementType)) { + + // The element count here is the total number of non-VLA elements. + mlir::Value numElements = nullptr; // cgf.getVLASize(vla).NumElts; + + // GEP indexes are signed, and scaling an index isn't permitted to + // signed-overflow, so we use the same semantics for our explicit + // multiply. We suppress this if overflow is not undefined behavior. + mlir::Type elemTy = cgf.convertTypeForMem(vla->getElementType()); + + index = cgf.getBuilder().createCast(cir::CastKind::integral, index, + numElements.getType()); + index = cgf.getBuilder().createMul(index, numElements); + + if (cgf.getLangOpts().isSignedOverflowDefined()) { + assert(!cir::MissingFeatures::opPtrStride()); + cgf.cgm.errorNYI("pointer stride"); + } else { + pointer = cgf.emitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, + isSubtraction, op.e->getExprLoc()); + } + + return pointer; + } + // Explicitly handle GNU void* and function pointer arithmetic extensions. The + // GNU void* casts amount to no-ops since our void* type is i8*, but this is + // future proof. + mlir::Type elemTy; + if (elementType->isVoidType() || elementType->isFunctionType()) + elemTy = cgf.UInt8Ty; + else + elemTy = cgf.convertTypeForMem(elementType); + + if (cgf.getLangOpts().isSignedOverflowDefined()) { + assert(!cir::MissingFeatures::opPtrStride()); + cgf.cgm.errorNYI("pointer stride"); + return pointer; + } + + return cgf.emitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, + isSubtraction, op.e->getExprLoc()); +} + +mlir::Value ScalarExprEmitter::emitMul(const BinOpInfo &ops) { + if (ops.compType->isSignedIntegerOrEnumerationType()) { + switch (cgf.getLangOpts().getSignedOverflowBehavior()) { + case LangOptions::SOB_Defined: + if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow)) + return builder.createMul(ops.lhs, ops.rhs); + [[fallthrough]]; + case LangOptions::SOB_Undefined: + if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow)) + return builder.createNSWMul(ops.lhs, ops.rhs); + [[fallthrough]]; + case LangOptions::SOB_Trapping: + if (canElideOverflowCheck(cgf.getContext(), ops)) + return builder.createNSWMul(ops.lhs, ops.rhs); + cgf.cgm.errorNYI("sanitizers"); + } + } + if (ops.fullType->isConstantMatrixType()) { + assert(!cir::MissingFeatures::matrixType()); + cgf.cgm.errorNYI("matrix types"); + return nullptr; + } + if (ops.compType->isUnsignedIntegerType() && + cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && + !canElideOverflowCheck(cgf.getContext(), ops)) + cgf.cgm.errorNYI("unsigned int overflow sanitizer"); + + if (cir::isFPOrFPVectorTy(ops.lhs.getType())) { + assert(!cir::MissingFeatures::cgFPOptionsRAII()); + return builder.createFMul(ops.lhs, ops.rhs); + } + + if (ops.isFixedPointOp()) { + assert(!cir::MissingFeatures::fixedPointType()); + cgf.cgm.errorNYI("fixed point"); + return nullptr; + } + + return builder.create<cir::BinOp>(cgf.getLoc(ops.loc), + cgf.convertType(ops.fullType), + cir::BinOpKind::Mul, ops.lhs, ops.rhs); +} +mlir::Value ScalarExprEmitter::emitDiv(const BinOpInfo &ops) { + return builder.create<cir::BinOp>(cgf.getLoc(ops.loc), + cgf.convertType(ops.fullType), + cir::BinOpKind::Div, ops.lhs, ops.rhs); +} +mlir::Value ScalarExprEmitter::emitRem(const BinOpInfo &ops) { + return builder.create<cir::BinOp>(cgf.getLoc(ops.loc), + cgf.convertType(ops.fullType), + cir::BinOpKind::Rem, ops.lhs, ops.rhs); +} + +mlir::Value ScalarExprEmitter::emitAdd(const BinOpInfo &ops) { + if (mlir::isa<cir::PointerType>(ops.lhs.getType()) || + mlir::isa<cir::PointerType>(ops.rhs.getType())) + return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/false); + if (ops.compType->isSignedIntegerOrEnumerationType()) { + switch (cgf.getLangOpts().getSignedOverflowBehavior()) { + case LangOptions::SOB_Defined: + if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow)) + return builder.createAdd(ops.lhs, ops.rhs); + [[fallthrough]]; + case LangOptions::SOB_Undefined: + if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow)) + return builder.createNSWAdd(ops.lhs, ops.rhs); + [[fallthrough]]; + case LangOptions::SOB_Trapping: + if (canElideOverflowCheck(cgf.getContext(), ops)) + return builder.createNSWAdd(ops.lhs, ops.rhs); + cgf.cgm.errorNYI("sanitizers"); + } + } + if (ops.fullType->isConstantMatrixType()) { + assert(!cir::MissingFeatures::matrixType()); + cgf.cgm.errorNYI("matrix types"); + return nullptr; + } + + if (ops.compType->isUnsignedIntegerType() && + cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && + !canElideOverflowCheck(cgf.getContext(), ops)) + cgf.cgm.errorNYI("unsigned int overflow sanitizer"); + + if (cir::isFPOrFPVectorTy(ops.lhs.getType())) { + assert(!cir::MissingFeatures::cgFPOptionsRAII()); + return builder.createFAdd(ops.lhs, ops.rhs); + } + + if (ops.isFixedPointOp()) { + assert(!cir::MissingFeatures::fixedPointType()); + cgf.cgm.errorNYI("fixed point"); + return {}; + } + + return builder.create<cir::BinOp>(cgf.getLoc(ops.loc), + cgf.convertType(ops.fullType), + cir::BinOpKind::Add, ops.lhs, ops.rhs); +} + +mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) { + // The LHS is always a pointer if either side is. + if (!mlir::isa<cir::PointerType>(ops.lhs.getType())) { + if (ops.compType->isSignedIntegerOrEnumerationType()) { + switch (cgf.getLangOpts().getSignedOverflowBehavior()) { + case LangOptions::SOB_Defined: { + if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow)) + return builder.createSub(ops.lhs, ops.rhs); + [[fallthrough]]; + } + case LangOptions::SOB_Undefined: + if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow)) + return builder.createNSWSub(ops.lhs, ops.rhs); + [[fallthrough]]; + case LangOptions::SOB_Trapping: + if (canElideOverflowCheck(cgf.getContext(), ops)) + return builder.createNSWSub(ops.lhs, ops.rhs); + cgf.cgm.errorNYI("sanitizers"); + } + } + + if (ops.fullType->isConstantMatrixType()) { + assert(!cir::MissingFeatures::matrixType()); + cgf.cgm.errorNYI("matrix types"); + return nullptr; + } + + if (ops.compType->isUnsignedIntegerType() && + cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && + !canElideOverflowCheck(cgf.getContext(), ops)) + cgf.cgm.errorNYI("unsigned int overflow sanitizer"); + + if (cir::isFPOrFPVectorTy(ops.lhs.getType())) { + assert(!cir::MissingFeatures::cgFPOptionsRAII()); + return builder.createFSub(ops.lhs, ops.rhs); + } + + if (ops.isFixedPointOp()) { + assert(!cir::MissingFeatures::fixedPointType()); + cgf.cgm.errorNYI("fixed point"); + return {}; + } + + return builder.create<cir::BinOp>(cgf.getLoc(ops.loc), + cgf.convertType(ops.fullType), + cir::BinOpKind::Sub, ops.lhs, ops.rhs); + } + + // If the RHS is not a pointer, then we have normal pointer + // arithmetic. + if (!mlir::isa<cir::PointerType>(ops.rhs.getType())) + return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/true); + + // Otherwise, this is a pointer subtraction + + // Do the raw subtraction part. + // + // TODO(cir): note for LLVM lowering out of this; when expanding this into + // LLVM we shall take VLA's, division by element size, etc. + // + // See more in `EmitSub` in CGExprScalar.cpp. + assert(!cir::MissingFeatures::opPtrDiff()); + cgf.cgm.errorNYI("ptrdiff"); + return {}; +} + +mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &ops) { + // TODO: This misses out on the sanitizer check below. + if (ops.isFixedPointOp()) { + assert(cir::MissingFeatures::fixedPointType()); + cgf.cgm.errorNYI("fixed point"); + return {}; + } + + // CIR accepts shift between different types, meaning nothing special + // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type: + // promote or truncate the RHS to the same size as the LHS. + + bool sanitizeSignedBase = cgf.sanOpts.has(SanitizerKind::ShiftBase) && + ops.compType->hasSignedIntegerRepresentation() && + !cgf.getLangOpts().isSignedOverflowDefined() && + !cgf.getLangOpts().CPlusPlus20; + bool sanitizeUnsignedBase = + cgf.sanOpts.has(SanitizerKind::UnsignedShiftBase) && + ops.compType->hasUnsignedIntegerRepresentation(); + bool sanitizeBase = sanitizeSignedBase || sanitizeUnsignedBase; + bool sanitizeExponent = cgf.sanOpts.has(SanitizerKind::ShiftExponent); + + // OpenCL 6.3j: shift values are effectively % word size of LHS. + if (cgf.getLangOpts().OpenCL) + cgf.cgm.errorNYI("opencl"); + else if ((sanitizeBase || sanitizeExponent) && + mlir::isa<cir::IntType>(ops.lhs.getType())) + cgf.cgm.errorNYI("sanitizers"); + + cgf.cgm.errorNYI("shift ops"); + return {}; +} + +mlir::Value ScalarExprEmitter::emitShr(const BinOpInfo &ops) { + // TODO: This misses out on the sanitizer check below. + if (ops.isFixedPointOp()) { + assert(cir::MissingFeatures::fixedPointType()); + cgf.cgm.errorNYI("fixed point"); + return {}; + } + + // CIR accepts shift between different types, meaning nothing special + // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type: + // promote or truncate the RHS to the same size as the LHS. + + // OpenCL 6.3j: shift values are effectively % word size of LHS. + if (cgf.getLangOpts().OpenCL) + cgf.cgm.errorNYI("opencl"); + else if (cgf.sanOpts.has(SanitizerKind::ShiftExponent) && + mlir::isa<cir::IntType>(ops.lhs.getType())) + cgf.cgm.errorNYI("sanitizers"); + + // Note that we don't need to distinguish unsigned treatment at this + // point since it will be handled later by LLVM lowering. + cgf.cgm.errorNYI("shift ops"); + return {}; +} + +mlir::Value ScalarExprEmitter::emitAnd(const BinOpInfo &ops) { + return builder.create<cir::BinOp>(cgf.getLoc(ops.loc), + cgf.convertType(ops.fullType), + cir::BinOpKind::And, ops.lhs, ops.rhs); +} +mlir::Value ScalarExprEmitter::emitXor(const BinOpInfo &ops) { + return builder.create<cir::BinOp>(cgf.getLoc(ops.loc), + cgf.convertType(ops.fullType), + cir::BinOpKind::Xor, ops.lhs, ops.rhs); +} +mlir::Value ScalarExprEmitter::emitOr(const BinOpInfo &ops) { + return builder.create<cir::BinOp>(cgf.getLoc(ops.loc), + cgf.convertType(ops.fullType), + cir::BinOpKind::Or, ops.lhs, ops.rhs); +} + // Emit code for an explicit or implicit cast. Implicit // casts have to handle a more broad range of conversions than explicit // casts, as they handle things like function to ptr-to-function decay @@ -661,7 +1356,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) { } case CK_NullToPointer: { - if (MustVisitNullValue(subExpr)) + if (mustVisitNullValue(subExpr)) cgf.emitIgnoredExpr(subExpr); // Note that DestTy is used as the MLIR type instead of a custom @@ -790,9 +1485,26 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( cgf.cgm.UInt64Ty, e->EvaluateKnownConstInt(cgf.getContext()))); } -mlir::Value CIRGenFunction::emitScalarPrePostIncDec(const UnaryOperator *E, - LValue LV, bool isInc, +mlir::Value CIRGenFunction::emitScalarPrePostIncDec(const UnaryOperator *e, + LValue lv, bool isInc, bool isPre) { return ScalarExprEmitter(*this, builder) - .emitScalarPrePostIncDec(E, LV, isInc, isPre); + .emitScalarPrePostIncDec(e, lv, isInc, isPre); +} + +mlir::Value CIRGenFunction::emitCheckedInBoundsGEP( + mlir::Type elemTy, mlir::Value ptr, ArrayRef<mlir::Value> idxList, + bool signedIndices, bool isSubtraction, SourceLocation loc) { + assert(idxList.size() == 1 && "multi-index ptr arithmetic NYI"); + assert(!cir::MissingFeatures::opPtrStride()); + mlir::Value gepVal = nullptr; + + // If the pointer overflow sanitizer isn't enabled, do nothing. + if (!sanOpts.has(SanitizerKind::PointerOverflow)) + return gepVal; + return ptr; + + assert(!cir::MissingFeatures::pointerOverflowSanitizer()); + cgm.errorNYI("pointer overflow sanitizer"); + return nullptr; } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 16547f2401292..4ba3d416007f2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -444,6 +444,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) { return LValue(); case Expr::UnaryOperatorClass: return emitUnaryOpLValue(cast<UnaryOperator>(e)); + case Expr::BinaryOperatorClass: + return emitBinaryOperatorLValue(cast<BinaryOperator>(e)); case Expr::DeclRefExprClass: return emitDeclRefLValue(cast<DeclRefExpr>(e)); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 631217cf67762..aaf70603f7782 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -29,8 +29,6 @@ #include "clang/CIR/MissingFeatures.h" #include "clang/CIR/TypeEvaluationKind.h" -#include "llvm/ADT/ScopedHashTable.h" - namespace { class ScalarExprEmitter; } // namespace @@ -62,7 +60,7 @@ class CIRGenFunction : public CIRGenTypeCache { using DeclMapTy = llvm::DenseMap<const clang::Decl *, Address>; /// This keeps track of the CIR allocas or globals for local C /// declarations. - DeclMapTy LocalDeclMap; + DeclMapTy localDeclMap; clang::ASTContext &getContext() const { return cgm.getASTContext(); } @@ -80,11 +78,11 @@ class CIRGenFunction : public CIRGenTypeCache { /// this fuction. These can potentially set the return value. bool sawAsmBlock = false; - mlir::Type convertTypeForMem(QualType T); + mlir::Type convertTypeForMem(QualType t); - mlir::Type convertType(clang::QualType T); - mlir::Type convertType(const TypeDecl *T) { - return convertType(getContext().getTypeDeclType(T)); + mlir::Type convertType(clang::QualType t); + mlir::Type convertType(const TypeDecl *t) { + return convertType(getContext().getTypeDeclType(t)); } /// Return the cir::TypeEvaluationKind of QualType \c type. @@ -219,11 +217,22 @@ class CIRGenFunction : public CIRGenTypeCache { void emitDecl(const clang::Decl &d); + /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to + /// detect undefined behavior when the pointer overflow sanitizer is enabled. + /// \p SignedIndices indicates whether any of the GEP indices are signed. + /// \p IsSubtraction indicates whether the expression used to form the GEP + /// is a subtraction. + mlir::Value emitCheckedInBoundsGEP(mlir::Type elemTy, mlir::Value ptr, + llvm::ArrayRef<mlir::Value> idxList, + bool signedIndices, bool isSubtraction, + SourceLocation loc); + void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit = false); LValue emitDeclRefLValue(const clang::DeclRefExpr *e); LValue emitUnaryOpLValue(const clang::UnaryOperator *e); + LValue emitBinaryOperatorLValue(const BinaryOperator *e); /// Determine whether the given initializer is trivial in the sense /// that it requires no code to be generated. @@ -322,8 +331,8 @@ class CIRGenFunction : public CIRGenTypeCache { /// Set the address of a local variable. void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr) { - assert(!LocalDeclMap.count(vd) && "Decl already exists in LocalDeclMap!"); - LocalDeclMap.insert({vd, addr}); + assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!"); + localDeclMap.insert({vd, addr}); // TODO: Add symbol table support } @@ -332,6 +341,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// Emit the computation of the specified expression of scalar type. mlir::Value emitScalarExpr(const clang::Expr *e); + mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType); cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType); @@ -341,7 +351,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// Emit code for the start of a function. /// \param loc The location to be associated with the function. /// \param startLoc The location of the function body. - void startFunction(clang::GlobalDecl gd, clang::QualType retTy, + void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index ae86fefcf3657..cdcfa77b66379 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -75,8 +75,8 @@ void cir::CIRDialect::initialize() { // Check if a region's termination omission is valid and, if so, creates and // inserts the omitted terminator into the region. -LogicalResult ensureRegionTerm(OpAsmParser &parser, Region ®ion, - SMLoc errLoc) { +static LogicalResult ensureRegionTerm(OpAsmParser &parser, Region ®ion, + SMLoc errLoc) { Location eLoc = parser.getEncodedSourceLoc(parser.getCurrentLocation()); OpBuilder builder(parser.getBuilder().getContext()); @@ -102,7 +102,7 @@ LogicalResult ensureRegionTerm(OpAsmParser &parser, Region ®ion, } // True if the region's terminator should be omitted. -bool omitRegionTerm(mlir::Region &r) { +static bool omitRegionTerm(mlir::Region &r) { const auto singleNonEmptyBlock = r.hasOneBlock() && !r.back().empty(); const auto yieldsNothing = [&r]() { auto y = dyn_cast<cir::YieldOp>(r.back().getTerminator()); @@ -346,9 +346,9 @@ LogicalResult cir::CastOp::verify() { return emitOpError() << "requires two types differ in addrspace only"; return success(); } + default: + llvm_unreachable("Unknown CastOp kind?"); } - - llvm_unreachable("Unknown CastOp kind?"); } static bool isIntOrBoolCast(cir::CastOp op) { @@ -728,6 +728,37 @@ void cir::FuncOp::print(OpAsmPrinter &p) { // been implemented yet. mlir::LogicalResult cir::FuncOp::verify() { return success(); } +LogicalResult cir::BinOp::verify() { + bool noWrap = getNoUnsignedWrap() || getNoSignedWrap(); + bool saturated = getSaturated(); + + if (!isa<cir::IntType>(getType()) && noWrap) + return emitError() + << "only operations on integer values may have nsw/nuw flags"; + + bool noWrapOps = getKind() == cir::BinOpKind::Add || + getKind() == cir::BinOpKind::Sub || + getKind() == cir::BinOpKind::Mul; + + bool saturatedOps = + getKind() == cir::BinOpKind::Add || getKind() == cir::BinOpKind::Sub; + + if (noWrap && !noWrapOps) + return emitError() << "The nsw/nuw flags are applicable to opcodes: 'add', " + "'sub' and 'mul'"; + if (saturated && !saturatedOps) + return emitError() << "The saturated flag is applicable to opcodes: 'add' " + "and 'sub'"; + if (noWrap && saturated) + return emitError() << "The nsw/nuw flags and the saturated flag are " + "mutually exclusive"; + + assert(!cir::MissingFeatures::complexType()); + // TODO(cir): verify for complex binops + + return mlir::success(); +} + //===----------------------------------------------------------------------===// // UnaryOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 6291297492227..356f7f6244db8 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -14,6 +14,7 @@ #include "mlir/IR/DialectImplementation.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/MissingFeatures.h" #include "llvm/ADT/TypeSwitch.h" //===----------------------------------------------------------------------===// @@ -274,6 +275,15 @@ bool cir::isAnyFloatingPointType(mlir::Type t) { cir::FP80Type, cir::BF16Type, cir::FP16Type, cir::FP128Type>(t); } +//===----------------------------------------------------------------------===// +// Floating-point and Float-point Vector type helpers +//===----------------------------------------------------------------------===// + +bool cir::isFPOrFPVectorTy(mlir::Type t) { + assert(!cir::MissingFeatures::vectorType()); + return isAnyFloatingPointType(t); +} + //===----------------------------------------------------------------------===// // FuncType Definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 30cbee48b4bdc..c0588ed906d95 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -32,7 +32,6 @@ #include "clang/CIR/Passes.h" #include "llvm/ADT/TypeSwitch.h" #include "llvm/IR/Module.h" -#include "llvm/Support/Error.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/TimeProfiler.h" @@ -85,12 +84,11 @@ static mlir::Value createIntCast(mlir::OpBuilder &bld, mlir::Value src, if (dstWidth > srcWidth && isSigned) return bld.create<mlir::LLVM::SExtOp>(loc, dstTy, src); - else if (dstWidth > srcWidth) + if (dstWidth > srcWidth) return bld.create<mlir::LLVM::ZExtOp>(loc, dstTy, src); - else if (dstWidth < srcWidth) + if (dstWidth < srcWidth) return bld.create<mlir::LLVM::TruncOp>(loc, dstTy, src); - else - return bld.create<mlir::LLVM::BitcastOp>(loc, dstTy, src); + return bld.create<mlir::LLVM::BitcastOp>(loc, dstTy, src); } /// Emits the value from memory as expected by its users. Should be called when @@ -994,6 +992,239 @@ mlir::LogicalResult CIRToLLVMUnaryOpLowering::matchAndRewrite( << elementType; } +mlir::LLVM::IntegerOverflowFlags +CIRToLLVMBinOpLowering::getIntOverflowFlag(cir::BinOp op) const { + if (op.getNoUnsignedWrap()) + return mlir::LLVM::IntegerOverflowFlags::nuw; + + if (op.getNoSignedWrap()) + return mlir::LLVM::IntegerOverflowFlags::nsw; + + return mlir::LLVM::IntegerOverflowFlags::none; +} + +static bool isIntTypeUnsigned(mlir::Type type) { + // TODO: Ideally, we should only need to check cir::IntType here. + return mlir::isa<cir::IntType>(type) + ? mlir::cast<cir::IntType>(type).isUnsigned() + : mlir::cast<mlir::IntegerType>(type).isUnsigned(); +} + +mlir::LogicalResult CIRToLLVMBinOpLowering::matchAndRewrite( + cir::BinOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + if (adaptor.getLhs().getType() != adaptor.getRhs().getType()) + return op.emitError() << "inconsistent operands' types not supported yet"; + + mlir::Type type = op.getRhs().getType(); + assert(!cir::MissingFeatures::vectorType()); + if (!mlir::isa<cir::IntType, cir::BoolType, cir::CIRFPTypeInterface, + mlir::IntegerType>(type)) + return op.emitError() << "operand type not supported yet"; + + auto llvmTy = getTypeConverter()->convertType(op.getType()); + mlir::Type llvmEltTy = + mlir::isa<mlir::VectorType>(llvmTy) + ? mlir::cast<mlir::VectorType>(llvmTy).getElementType() + : llvmTy; + auto rhs = adaptor.getRhs(); + auto lhs = adaptor.getLhs(); + + type = elementTypeIfVector(type); + + switch (op.getKind()) { + case cir::BinOpKind::Add: + if (mlir::isa<mlir::IntegerType>(llvmEltTy)) { + if (op.getSaturated()) { + if (isIntTypeUnsigned(type)) { + rewriter.replaceOpWithNewOp<mlir::LLVM::UAddSat>(op, lhs, rhs); + break; + } + rewriter.replaceOpWithNewOp<mlir::LLVM::SAddSat>(op, lhs, rhs); + break; + } + rewriter.replaceOpWithNewOp<mlir::LLVM::AddOp>(op, llvmTy, lhs, rhs, + getIntOverflowFlag(op)); + } else + rewriter.replaceOpWithNewOp<mlir::LLVM::FAddOp>(op, lhs, rhs); + break; + case cir::BinOpKind::Sub: + if (mlir::isa<mlir::IntegerType>(llvmEltTy)) { + if (op.getSaturated()) { + if (isIntTypeUnsigned(type)) { + rewriter.replaceOpWithNewOp<mlir::LLVM::USubSat>(op, lhs, rhs); + break; + } + rewriter.replaceOpWithNewOp<mlir::LLVM::SSubSat>(op, lhs, rhs); + break; + } + rewriter.replaceOpWithNewOp<mlir::LLVM::SubOp>(op, llvmTy, lhs, rhs, + getIntOverflowFlag(op)); + } else + rewriter.replaceOpWithNewOp<mlir::LLVM::FSubOp>(op, lhs, rhs); + break; + case cir::BinOpKind::Mul: + if (mlir::isa<mlir::IntegerType>(llvmEltTy)) + rewriter.replaceOpWithNewOp<mlir::LLVM::MulOp>(op, llvmTy, lhs, rhs, + getIntOverflowFlag(op)); + else + rewriter.replaceOpWithNewOp<mlir::LLVM::FMulOp>(op, lhs, rhs); + break; + case cir::BinOpKind::Div: + if (mlir::isa<mlir::IntegerType>(llvmEltTy)) { + auto isUnsigned = isIntTypeUnsigned(type); + if (isUnsigned) + rewriter.replaceOpWithNewOp<mlir::LLVM::UDivOp>(op, lhs, rhs); + else + rewriter.replaceOpWithNewOp<mlir::LLVM::SDivOp>(op, lhs, rhs); + } else + rewriter.replaceOpWithNewOp<mlir::LLVM::FDivOp>(op, lhs, rhs); + break; + case cir::BinOpKind::Rem: + if (mlir::isa<mlir::IntegerType>(llvmEltTy)) { + auto isUnsigned = isIntTypeUnsigned(type); + if (isUnsigned) + rewriter.replaceOpWithNewOp<mlir::LLVM::URemOp>(op, lhs, rhs); + else + rewriter.replaceOpWithNewOp<mlir::LLVM::SRemOp>(op, lhs, rhs); + } else + rewriter.replaceOpWithNewOp<mlir::LLVM::FRemOp>(op, lhs, rhs); + break; + case cir::BinOpKind::And: + rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(op, lhs, rhs); + break; + case cir::BinOpKind::Or: + rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(op, lhs, rhs); + break; + case cir::BinOpKind::Xor: + rewriter.replaceOpWithNewOp<mlir::LLVM::XOrOp>(op, lhs, rhs); + break; + case cir::BinOpKind::Max: + if (mlir::isa<mlir::IntegerType>(llvmEltTy)) { + auto isUnsigned = isIntTypeUnsigned(type); + if (isUnsigned) + rewriter.replaceOpWithNewOp<mlir::LLVM::UMaxOp>(op, llvmTy, lhs, rhs); + else + rewriter.replaceOpWithNewOp<mlir::LLVM::SMaxOp>(op, llvmTy, lhs, rhs); + } + break; + } + + return mlir::LogicalResult::success(); +} + +mlir::LogicalResult CIRToLLVMBinOpOverflowOpLowering::matchAndRewrite( + cir::BinOpOverflowOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto loc = op.getLoc(); + auto arithKind = op.getKind(); + auto operandTy = op.getLhs().getType(); + auto resultTy = op.getResult().getType(); + + auto encompassedTyInfo = computeEncompassedTypeWidth(operandTy, resultTy); + auto encompassedLLVMTy = rewriter.getIntegerType(encompassedTyInfo.width); + + auto lhs = adaptor.getLhs(); + auto rhs = adaptor.getRhs(); + if (operandTy.getWidth() < encompassedTyInfo.width) { + if (operandTy.isSigned()) { + lhs = rewriter.create<mlir::LLVM::SExtOp>(loc, encompassedLLVMTy, lhs); + rhs = rewriter.create<mlir::LLVM::SExtOp>(loc, encompassedLLVMTy, rhs); + } else { + lhs = rewriter.create<mlir::LLVM::ZExtOp>(loc, encompassedLLVMTy, lhs); + rhs = rewriter.create<mlir::LLVM::ZExtOp>(loc, encompassedLLVMTy, rhs); + } + } + + auto intrinName = getLLVMIntrinName(arithKind, encompassedTyInfo.sign, + encompassedTyInfo.width); + auto intrinNameAttr = mlir::StringAttr::get(op.getContext(), intrinName); + + auto overflowLLVMTy = rewriter.getI1Type(); + auto intrinRetTy = mlir::LLVM::LLVMStructType::getLiteral( + rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy}); + + auto callLLVMIntrinOp = rewriter.create<mlir::LLVM::CallIntrinsicOp>( + loc, intrinRetTy, intrinNameAttr, mlir::ValueRange{lhs, rhs}); + auto intrinRet = callLLVMIntrinOp.getResult(0); + + auto result = rewriter + .create<mlir::LLVM::ExtractValueOp>(loc, intrinRet, + ArrayRef<int64_t>{0}) + .getResult(); + auto overflow = rewriter + .create<mlir::LLVM::ExtractValueOp>(loc, intrinRet, + ArrayRef<int64_t>{1}) + .getResult(); + + if (resultTy.getWidth() < encompassedTyInfo.width) { + auto resultLLVMTy = getTypeConverter()->convertType(resultTy); + auto truncResult = + rewriter.create<mlir::LLVM::TruncOp>(loc, resultLLVMTy, result); + + // Extend the truncated result back to the encompassing type to check for + // any overflows during the truncation. + mlir::Value truncResultExt; + if (resultTy.isSigned()) + truncResultExt = rewriter.create<mlir::LLVM::SExtOp>( + loc, encompassedLLVMTy, truncResult); + else + truncResultExt = rewriter.create<mlir::LLVM::ZExtOp>( + loc, encompassedLLVMTy, truncResult); + auto truncOverflow = rewriter.create<mlir::LLVM::ICmpOp>( + loc, mlir::LLVM::ICmpPredicate::ne, truncResultExt, result); + + result = truncResult; + overflow = rewriter.create<mlir::LLVM::OrOp>(loc, overflow, truncOverflow); + } + + auto boolLLVMTy = getTypeConverter()->convertType(op.getOverflow().getType()); + if (boolLLVMTy != rewriter.getI1Type()) + overflow = rewriter.create<mlir::LLVM::ZExtOp>(loc, boolLLVMTy, overflow); + + rewriter.replaceOp(op, mlir::ValueRange{result, overflow}); + + return mlir::success(); +} + +std::string CIRToLLVMBinOpOverflowOpLowering::getLLVMIntrinName( + cir::BinOpOverflowKind opKind, bool isSigned, unsigned width) { + // The intrinsic name is `@llvm.{s|u}{opKind}.with.overflow.i{width}` + + std::string name = "llvm."; + + if (isSigned) + name.push_back('s'); + else + name.push_back('u'); + + switch (opKind) { + case cir::BinOpOverflowKind::Add: + name.append("add."); + break; + case cir::BinOpOverflowKind::Sub: + name.append("sub."); + break; + case cir::BinOpOverflowKind::Mul: + name.append("mul."); + break; + } + + name.append("with.overflow.i"); + name.append(std::to_string(width)); + + return name; +} + +CIRToLLVMBinOpOverflowOpLowering::EncompassedTypeInfo +CIRToLLVMBinOpOverflowOpLowering::computeEncompassedTypeWidth( + cir::IntType operandTy, cir::IntType resultTy) { + auto sign = operandTy.getIsSigned() || resultTy.getIsSigned(); + auto width = std::max(operandTy.getWidth() + (sign && operandTy.isUnsigned()), + resultTy.getWidth() + (sign && resultTy.isUnsigned())); + return {sign, width}; +} + static void prepareTypeConverter(mlir::LLVMTypeConverter &converter, mlir::DataLayout &dataLayout) { converter.addConversion([&](cir::PointerType type) -> mlir::Type { @@ -1133,6 +1364,8 @@ void ConvertCIRToLLVMPass::runOnOperation() { patterns.add< // clang-format off CIRToLLVMBrCondOpLowering, + CIRToLLVMBinOpLowering, + CIRToLLVMBinOpOverflowOpLowering, CIRToLLVMBrOpLowering, CIRToLLVMFuncOpLowering, CIRToLLVMTrapOpLowering, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index a01a9a5f4f076..9fb8babe3dd6c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -178,6 +178,39 @@ class CIRToLLVMUnaryOpLowering mlir::ConversionPatternRewriter &) const override; }; +class CIRToLLVMBinOpLowering : public mlir::OpConversionPattern<cir::BinOp> { + mlir::LLVM::IntegerOverflowFlags getIntOverflowFlag(cir::BinOp op) const; + +public: + using mlir::OpConversionPattern<cir::BinOp>::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::BinOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMBinOpOverflowOpLowering + : public mlir::OpConversionPattern<cir::BinOpOverflowOp> { +public: + using mlir::OpConversionPattern<cir::BinOpOverflowOp>::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::BinOpOverflowOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; + +private: + static std::string getLLVMIntrinName(cir::BinOpOverflowKind opKind, + bool isSigned, unsigned width); + + struct EncompassedTypeInfo { + bool sign; + unsigned width; + }; + + static EncompassedTypeInfo computeEncompassedTypeWidth(cir::IntType operandTy, + cir::IntType resultTy); +}; + class CIRToLLVMBrOpLowering : public mlir::OpConversionPattern<cir::BrOp> { public: using mlir::OpConversionPattern<cir::BrOp>::OpConversionPattern; diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp new file mode 100644 index 0000000000000..4c20f79600fac --- /dev/null +++ b/clang/test/CIR/CodeGen/binop.cpp @@ -0,0 +1,33 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -O1 -Wno-unused-value -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void b0(int a, int b) { + int x = a * b; + x = x / b; + x = x % b; + x = x + b; + x = x - b; + x = x & b; + x = x ^ b; + x = x | b; +} + +// CHECK: %{{.+}} = cir.binop(mul, %{{.+}}, %{{.+}}) nsw : !s32i +// CHECK: %{{.+}} = cir.binop(div, %{{.+}}, %{{.+}}) : !s32i +// CHECK: %{{.+}} = cir.binop(rem, %{{.+}}, %{{.+}}) : !s32i +// CHECK: %{{.+}} = cir.binop(add, %{{.+}}, %{{.+}}) nsw : !s32i +// CHECK: %{{.+}} = cir.binop(sub, %{{.+}}, %{{.+}}) nsw : !s32i +// CHECK: %{{.+}} = cir.binop(and, %{{.+}}, %{{.+}}) : !s32i +// CHECK: %{{.+}} = cir.binop(xor, %{{.+}}, %{{.+}}) : !s32i +// CHECK: %{{.+}} = cir.binop(or, %{{.+}}, %{{.+}}) : !s32i + +void testFloatingPointBinOps(float a, float b) { + a * b; + // CHECK: cir.binop(mul, %{{.+}}, %{{.+}}) : !cir.float + a / b; + // CHECK: cir.binop(div, %{{.+}}, %{{.+}}) : !cir.float + a + b; + // CHECK: cir.binop(add, %{{.+}}, %{{.+}}) : !cir.float + a - b; + // CHECK: cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.float +} diff --git a/clang/test/CIR/Lowering/binop-bool.cir b/clang/test/CIR/Lowering/binop-bool.cir new file mode 100644 index 0000000000000..7267c407cc0a7 --- /dev/null +++ b/clang/test/CIR/Lowering/binop-bool.cir @@ -0,0 +1,18 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +module { + cir.func @foo() { + %0 = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["b", init] {alignment = 4 : i64} + %2 = cir.load %0 : !cir.ptr<!cir.bool>, !cir.bool + %3 = cir.load %1 : !cir.ptr<!cir.bool>, !cir.bool + %4 = cir.binop(or, %2, %3) : !cir.bool + // CHECK: = llvm.or {{.*}}, {{.*}} : i1 + %5 = cir.binop(xor, %2, %3) : !cir.bool + // CHECK: = llvm.xor {{.*}}, {{.*}} : i1 + %6 = cir.binop(and, %2, %3) : !cir.bool + // CHECK: = llvm.and {{.*}}, {{.*}} : i1 + cir.return + } +} diff --git a/clang/test/CIR/Lowering/binop-fp.cir b/clang/test/CIR/Lowering/binop-fp.cir new file mode 100644 index 0000000000000..e69a69e6b0991 --- /dev/null +++ b/clang/test/CIR/Lowering/binop-fp.cir @@ -0,0 +1,68 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir --target x86_64-unknown-linux-gnu --disable-cc-lowering | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo() { + %0 = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["c"] {alignment = 4 : i64} + %1 = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["d"] {alignment = 4 : i64} + %2 = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["y", init] {alignment = 4 : i64} + %3 = cir.alloca !cir.double, !cir.ptr<!cir.double>, ["e"] {alignment = 8 : i64} + %4 = cir.alloca !cir.double, !cir.ptr<!cir.double>, ["f"] {alignment = 8 : i64} + %5 = cir.alloca !cir.double, !cir.ptr<!cir.double>, ["g", init] {alignment = 8 : i64} + %6 = cir.load %0 : !cir.ptr<!cir.float>, !cir.float + %7 = cir.load %1 : !cir.ptr<!cir.float>, !cir.float + %8 = cir.binop(mul, %6, %7) : !cir.float + cir.store %8, %2 : !cir.float, !cir.ptr<!cir.float> + %9 = cir.load %2 : !cir.ptr<!cir.float>, !cir.float + %10 = cir.load %1 : !cir.ptr<!cir.float>, !cir.float + %11 = cir.binop(div, %9, %10) : !cir.float + cir.store %11, %2 : !cir.float, !cir.ptr<!cir.float> + %12 = cir.load %2 : !cir.ptr<!cir.float>, !cir.float + %13 = cir.load %1 : !cir.ptr<!cir.float>, !cir.float + %14 = cir.binop(add, %12, %13) : !cir.float + cir.store %14, %2 : !cir.float, !cir.ptr<!cir.float> + %15 = cir.load %2 : !cir.ptr<!cir.float>, !cir.float + %16 = cir.load %1 : !cir.ptr<!cir.float>, !cir.float + %17 = cir.binop(sub, %15, %16) : !cir.float + cir.store %17, %2 : !cir.float, !cir.ptr<!cir.float> + %18 = cir.load %3 : !cir.ptr<!cir.double>, !cir.double + %19 = cir.load %4 : !cir.ptr<!cir.double>, !cir.double + %20 = cir.binop(add, %18, %19) : !cir.double + cir.store %20, %5 : !cir.double, !cir.ptr<!cir.double> + %21 = cir.load %3 : !cir.ptr<!cir.double>, !cir.double + %22 = cir.load %4 : !cir.ptr<!cir.double>, !cir.double + %23 = cir.binop(sub, %21, %22) : !cir.double + cir.store %23, %5 : !cir.double, !cir.ptr<!cir.double> + %24 = cir.load %3 : !cir.ptr<!cir.double>, !cir.double + %25 = cir.load %4 : !cir.ptr<!cir.double>, !cir.double + %26 = cir.binop(mul, %24, %25) : !cir.double + cir.store %26, %5 : !cir.double, !cir.ptr<!cir.double> + %27 = cir.load %3 : !cir.ptr<!cir.double>, !cir.double + %28 = cir.load %4 : !cir.ptr<!cir.double>, !cir.double + %29 = cir.binop(div, %27, %28) : !cir.double + cir.store %29, %5 : !cir.double, !cir.ptr<!cir.double> + cir.return + } +} + +// MLIR: = llvm.alloca {{.*}} f32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR: = llvm.alloca {{.*}} f64 {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR: = llvm.fmul {{.*}} : f32 +// MLIR: = llvm.fdiv +// MLIR: = llvm.fadd +// MLIR: = llvm.fsub +// MLIR: = llvm.fadd {{.*}} : f64 +// MLIR: = llvm.fsub +// MLIR: = llvm.fmul +// MLIR: = llvm.fdiv + +// LLVM: = alloca float, i64 +// LLVM: = alloca double, i64 +// LLVM: = fmul float +// LLVM: = fdiv float +// LLVM: = fadd float +// LLVM: = fsub float +// LLVM: = fadd double +// LLVM: = fsub double +// LLVM: = fmul double +// LLVM: = fdiv double diff --git a/clang/test/CIR/Lowering/binop-overflow.cir b/clang/test/CIR/Lowering/binop-overflow.cir new file mode 100644 index 0000000000000..68af70aa6abb6 --- /dev/null +++ b/clang/test/CIR/Lowering/binop-overflow.cir @@ -0,0 +1,63 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir --target x86_64-unknown-linux-gnu --disable-cc-lowering -o - | FileCheck %s -check-prefix=LLVM + +!u32i = !cir.int<u, 32> +!s32i = !cir.int<s, 32> + +module { + cir.func @test_add_u32_u32_u32(%lhs: !u32i, %rhs: !u32i, %res: !cir.ptr<!u32i>) -> !cir.bool { + %result, %overflow = cir.binop.overflow(add, %lhs, %rhs) : !u32i, (!u32i, !cir.bool) + cir.store %result, %res : !u32i, !cir.ptr<!u32i> + cir.return %overflow : !cir.bool + } + + // MLIR: llvm.func @test_add_u32_u32_u32(%[[LHS:.+]]: i32, %[[RHS:.+]]: i32, %[[RES_PTR:.+]]: !llvm.ptr) -> i1 + // MLIR-NEXT: %[[#INTRIN_RET:]] = llvm.call_intrinsic "llvm.uadd.with.overflow.i32"(%[[LHS]], %[[RHS]]) : (i32, i32) -> !llvm.struct<(i32, i1)> + // MLIR-NEXT: %[[#RES:]] = llvm.extractvalue %[[#INTRIN_RET]][0] : !llvm.struct<(i32, i1)> + // MLIR-NEXT: %[[#OVFL:]] = llvm.extractvalue %[[#INTRIN_RET]][1] : !llvm.struct<(i32, i1)> + // MLIR-NEXT: llvm.store %[[#RES]], %[[RES_PTR]] {{.*}} : i32, !llvm.ptr + // MLIR-NEXT: llvm.return %[[#OVFL]] : i1 + // MLIR-NEXT: } + + // LLVM: define i1 @test_add_u32_u32_u32(i32 %[[#LHS:]], i32 %[[#RHS:]], ptr %[[#RES_PTR:]]) + // LLVM-NEXT: %[[#INTRIN_RET:]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %[[#LHS]], i32 %[[#RHS]]) + // LLVM-NEXT: %[[#RES:]] = extractvalue { i32, i1 } %[[#INTRIN_RET]], 0 + // LLVM-NEXT: %[[#OVFL:]] = extractvalue { i32, i1 } %[[#INTRIN_RET]], 1 + // LLVM-NEXT: store i32 %[[#RES]], ptr %[[#RES_PTR]], align 4 + // LLVM-NEXT: ret i1 %[[#OVFL]] + // LLVM-NEXT: } + + cir.func @test_add_u32_u32_i32(%lhs: !u32i, %rhs: !u32i, %res: !cir.ptr<!s32i>) -> !cir.bool { + %result, %overflow = cir.binop.overflow(add, %lhs, %rhs) : !u32i, (!s32i, !cir.bool) + cir.store %result, %res : !s32i, !cir.ptr<!s32i> + cir.return %overflow : !cir.bool + } + + // MLIR: llvm.func @test_add_u32_u32_i32(%[[LHS:.+]]: i32, %[[RHS:.+]]: i32, %[[RES_PTR:.+]]: !llvm.ptr) -> i1 + // MLIR-NEXT: %[[#LHS_EXT:]] = llvm.zext %[[LHS]] : i32 to i33 + // MLIR-NEXT: %[[#RHS_EXT:]] = llvm.zext %[[RHS]] : i32 to i33 + // MLIR-NEXT: %[[#INTRIN_RET:]] = llvm.call_intrinsic "llvm.sadd.with.overflow.i33"(%[[#LHS_EXT]], %[[#RHS_EXT]]) : (i33, i33) -> !llvm.struct<(i33, i1)> + // MLIR-NEXT: %[[#RES_EXT:]] = llvm.extractvalue %[[#INTRIN_RET]][0] : !llvm.struct<(i33, i1)> + // MLIR-NEXT: %[[#ARITH_OVFL:]] = llvm.extractvalue %[[#INTRIN_RET]][1] : !llvm.struct<(i33, i1)> + // MLIR-NEXT: %[[#RES:]] = llvm.trunc %[[#RES_EXT]] : i33 to i32 + // MLIR-NEXT: %[[#RES_EXT_2:]] = llvm.sext %[[#RES]] : i32 to i33 + // MLIR-NEXT: %[[#TRUNC_OVFL:]] = llvm.icmp "ne" %[[#RES_EXT_2]], %[[#RES_EXT]] : i33 + // MLIR-NEXT: %[[#OVFL:]] = llvm.or %[[#ARITH_OVFL]], %[[#TRUNC_OVFL]] : i1 + // MLIR-NEXT: llvm.store %[[#RES]], %[[RES_PTR]] {{.*}} : i32, !llvm.ptr + // MLIR-NEXT: llvm.return %[[#OVFL]] : i1 + // MLIR-NEXT: } + + // LLVM: define i1 @test_add_u32_u32_i32(i32 %[[#LHS:]], i32 %[[#RHS:]], ptr %[[#RES_PTR:]]) + // LLVM-NEXT: %[[#LHS_EXT:]] = zext i32 %[[#LHS]] to i33 + // LLVM-NEXT: %[[#RHS_EXT:]] = zext i32 %[[#RHS]] to i33 + // LLVM-NEXT: %[[#INTRIN_RET:]] = call { i33, i1 } @llvm.sadd.with.overflow.i33(i33 %[[#LHS_EXT]], i33 %[[#RHS_EXT]]) + // LLVM-NEXT: %[[#RES_EXT:]] = extractvalue { i33, i1 } %[[#INTRIN_RET]], 0 + // LLVM-NEXT: %[[#ARITH_OVFL:]] = extractvalue { i33, i1 } %[[#INTRIN_RET]], 1 + // LLVM-NEXT: %[[#RES:]] = trunc i33 %[[#RES_EXT]] to i32 + // LLVM-NEXT: %[[#RES_EXT_2:]] = sext i32 %[[#RES]] to i33 + // LLVM-NEXT: %[[#TRUNC_OVFL:]] = icmp ne i33 %[[#RES_EXT_2]], %[[#RES_EXT]] + // LLVM-NEXT: %[[#OVFL:]] = or i1 %[[#ARITH_OVFL]], %[[#TRUNC_OVFL]] + // LLVM-NEXT: store i32 %[[#RES]], ptr %[[#RES_PTR]], align 4 + // LLVM-NEXT: ret i1 %[[#OVFL]] + // LLVM-NEXT: } +} diff --git a/clang/test/CIR/Lowering/binop-signed-int.cir b/clang/test/CIR/Lowering/binop-signed-int.cir new file mode 100644 index 0000000000000..17597f080cd44 --- /dev/null +++ b/clang/test/CIR/Lowering/binop-signed-int.cir @@ -0,0 +1,60 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s32i = !cir.int<s, 32> +module { + cir.func @foo() { + %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init] {alignment = 4 : i64} + %3 = cir.const #cir.int<2> : !s32i cir.store %3, %0 : !s32i, !cir.ptr<!s32i> + %4 = cir.const #cir.int<1> : !s32i cir.store %4, %1 : !s32i, !cir.ptr<!s32i> + %5 = cir.load %0 : !cir.ptr<!s32i>, !s32i + %6 = cir.load %1 : !cir.ptr<!s32i>, !s32i + %7 = cir.binop(mul, %5, %6) : !s32i + // CHECK: = llvm.mul + cir.store %7, %2 : !s32i, !cir.ptr<!s32i> + %8 = cir.load %2 : !cir.ptr<!s32i>, !s32i + %9 = cir.load %1 : !cir.ptr<!s32i>, !s32i + %10 = cir.binop(div, %8, %9) : !s32i + // CHECK: = llvm.sdiv + cir.store %10, %2 : !s32i, !cir.ptr<!s32i> + %11 = cir.load %2 : !cir.ptr<!s32i>, !s32i + %12 = cir.load %1 : !cir.ptr<!s32i>, !s32i + %13 = cir.binop(rem, %11, %12) : !s32i + // CHECK: = llvm.srem + cir.store %13, %2 : !s32i, !cir.ptr<!s32i> + %14 = cir.load %2 : !cir.ptr<!s32i>, !s32i + %15 = cir.load %1 : !cir.ptr<!s32i>, !s32i + %16 = cir.binop(add, %14, %15) : !s32i + // CHECK: = llvm.add + cir.store %16, %2 : !s32i, !cir.ptr<!s32i> + %17 = cir.load %2 : !cir.ptr<!s32i>, !s32i + %18 = cir.load %1 : !cir.ptr<!s32i>, !s32i + %19 = cir.binop(sub, %17, %18) : !s32i + // CHECK: = llvm.sub + cir.store %19, %2 : !s32i, !cir.ptr<!s32i> + %26 = cir.load %2 : !cir.ptr<!s32i>, !s32i + %27 = cir.load %1 : !cir.ptr<!s32i>, !s32i + %28 = cir.binop(and, %26, %27) : !s32i + // CHECK: = llvm.and + cir.store %28, %2 : !s32i, !cir.ptr<!s32i> + %29 = cir.load %2 : !cir.ptr<!s32i>, !s32i + %30 = cir.load %1 : !cir.ptr<!s32i>, !s32i + %31 = cir.binop(xor, %29, %30) : !s32i + // CHECK: = llvm.xor + cir.store %31, %2 : !s32i, !cir.ptr<!s32i> + %32 = cir.load %2 : !cir.ptr<!s32i>, !s32i + %33 = cir.load %1 : !cir.ptr<!s32i>, !s32i + %34 = cir.binop(or, %32, %33) : !s32i + // CHECK: = llvm.or + %35 = cir.binop(add, %32, %33) sat: !s32i + // CHECK: = llvm.intr.sadd.sat{{.*}}(i32, i32) -> i32 + %36 = cir.binop(sub, %32, %33) sat: !s32i + // CHECK: = llvm.intr.ssub.sat{{.*}}(i32, i32) -> i32 + cir.store %34, %2 : !s32i, !cir.ptr<!s32i> + %37 = cir.binop(max, %32, %33) : !s32i + // CHECK: = llvm.intr.smax + cir.return + } +} diff --git a/clang/test/CIR/Lowering/binop-unsigned-int.cir b/clang/test/CIR/Lowering/binop-unsigned-int.cir new file mode 100644 index 0000000000000..46c62b339f2ed --- /dev/null +++ b/clang/test/CIR/Lowering/binop-unsigned-int.cir @@ -0,0 +1,73 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir --target x86_64-unknown-linux-gnu --disable-cc-lowering | FileCheck %s -check-prefix=LLVM +!u32i = !cir.int<u, 32> + +module { + cir.func @foo() { + %0 = cir.alloca !u32i, !cir.ptr<!u32i>, ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !u32i, !cir.ptr<!u32i>, ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !u32i, !cir.ptr<!u32i>, ["x", init] {alignment = 4 : i64} + %3 = cir.const #cir.int<2> : !u32i cir.store %3, %0 : !u32i, !cir.ptr<!u32i> + %4 = cir.const #cir.int<1> : !u32i cir.store %4, %1 : !u32i, !cir.ptr<!u32i> + %5 = cir.load %0 : !cir.ptr<!u32i>, !u32i + %6 = cir.load %1 : !cir.ptr<!u32i>, !u32i + %7 = cir.binop(mul, %5, %6) : !u32i + cir.store %7, %2 : !u32i, !cir.ptr<!u32i> + %8 = cir.load %2 : !cir.ptr<!u32i>, !u32i + %9 = cir.load %1 : !cir.ptr<!u32i>, !u32i + %10 = cir.binop(div, %8, %9) : !u32i + cir.store %10, %2 : !u32i, !cir.ptr<!u32i> + %11 = cir.load %2 : !cir.ptr<!u32i>, !u32i + %12 = cir.load %1 : !cir.ptr<!u32i>, !u32i + %13 = cir.binop(rem, %11, %12) : !u32i + cir.store %13, %2 : !u32i, !cir.ptr<!u32i> + %14 = cir.load %2 : !cir.ptr<!u32i>, !u32i + %15 = cir.load %1 : !cir.ptr<!u32i>, !u32i + %16 = cir.binop(add, %14, %15) : !u32i + cir.store %16, %2 : !u32i, !cir.ptr<!u32i> + %17 = cir.load %2 : !cir.ptr<!u32i>, !u32i + %18 = cir.load %1 : !cir.ptr<!u32i>, !u32i + %19 = cir.binop(sub, %17, %18) : !u32i + cir.store %19, %2 : !u32i, !cir.ptr<!u32i> + %26 = cir.load %2 : !cir.ptr<!u32i>, !u32i + %27 = cir.load %1 : !cir.ptr<!u32i>, !u32i + %28 = cir.binop(and, %26, %27) : !u32i + cir.store %28, %2 : !u32i, !cir.ptr<!u32i> + %29 = cir.load %2 : !cir.ptr<!u32i>, !u32i + %30 = cir.load %1 : !cir.ptr<!u32i>, !u32i + %31 = cir.binop(xor, %29, %30) : !u32i + cir.store %31, %2 : !u32i, !cir.ptr<!u32i> + %32 = cir.load %2 : !cir.ptr<!u32i>, !u32i + %33 = cir.load %1 : !cir.ptr<!u32i>, !u32i + %34 = cir.binop(or, %32, %33) : !u32i + cir.store %34, %2 : !u32i, !cir.ptr<!u32i> + %35 = cir.binop(add, %32, %33) sat: !u32i + %36 = cir.binop(sub, %32, %33) sat: !u32i + %37 = cir.binop(max, %32, %33) : !u32i + cir.return + } +} + +// MLIR: = llvm.mul +// MLIR: = llvm.udiv +// MLIR: = llvm.urem +// MLIR: = llvm.add +// MLIR: = llvm.sub +// MLIR: = llvm.and +// MLIR: = llvm.xor +// MLIR: = llvm.or +// MLIR: = llvm.intr.uadd.sat{{.*}}(i32, i32) -> i32 +// MLIR: = llvm.intr.usub.sat{{.*}}(i32, i32) -> i32 +// MLIR: = llvm.intr.umax + +// LLVM: = mul i32 +// LLVM: = udiv i32 +// LLVM: = urem i32 +// LLVM: = add i32 +// LLVM: = sub i32 +// LLVM: = and i32 +// LLVM: = xor i32 +// LLVM: = or i32 +// LLVM: = call i32 @llvm.uadd.sat.i32 +// LLVM: = call i32 @llvm.usub.sat.i32 +// LLVM: = call i32 @llvm.umax.i32 >From b5141cb280408c7d51e4ece1abb9ff356416ca7b Mon Sep 17 00:00:00 2001 From: Morris Hafner <m...@users.noreply.github.com> Date: Mon, 24 Mar 2025 12:27:41 +0000 Subject: [PATCH 2/3] Update clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h Co-authored-by: Andy Kaylor <akay...@nvidia.com> --- clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 9fe80cde261a9..df9b84434b015 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -147,7 +147,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createCast(loc, cir::CastKind::bitcast, src, newTy); } - mlir::Value createBinop(mlir::Value lhs, cir::BinOpKind kind, + mlir::Value createBinOp(mlir::Value lhs, cir::BinOpKind kind, const llvm::APInt &rhs) { return create<cir::BinOp>(lhs.getLoc(), lhs.getType(), kind, lhs, getConstAPInt(lhs.getLoc(), lhs.getType(), rhs)); >From e440905426aff83aa0ed1482f8f84f6e2b752b5f Mon Sep 17 00:00:00 2001 From: Morris Hafner <m...@users.noreply.github.com> Date: Mon, 24 Mar 2025 12:28:19 +0000 Subject: [PATCH 3/3] Update clang/include/clang/CIR/MissingFeatures.h Co-authored-by: Andy Kaylor <akay...@nvidia.com> --- clang/include/clang/CIR/MissingFeatures.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 3654038a51fbd..7ddc7480630aa 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -136,7 +136,7 @@ struct MissingFeatures { static bool ternaryOp() { return false; } static bool tryOp() { return false; } static bool zextOp() { return false; } - static bool opPtrStride() { return false; } + static bool ptrStrideOp() { return false; } static bool opPtrDiff() { return false; } }; _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits