================ @@ -0,0 +1,453 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Builtin calls as CIR or a function call to be +// later resolved. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenCall.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "CIRGenValue.h" +#include "clang/AST/Expr.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/MissingFeatures.h" +#include "llvm/IR/Intrinsics.h" + +#include "clang/AST/GlobalDecl.h" +#include "clang/Basic/Builtins.h" + +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/Value.h" +#include "mlir/Support/LLVM.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace clang; +using namespace clang::CIRGen; +using namespace cir; +using namespace llvm; + +static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, + const CallExpr *e, mlir::Operation *calleeValue) { + CIRGenCallee callee = CIRGenCallee::forDirect(calleeValue, GlobalDecl(fd)); + return cgf.emitCall(e->getCallee()->getType(), callee, e, ReturnValueSlot()); +} + +static mlir::Type +decodeFixedType(CIRGenFunction &cgf, + ArrayRef<llvm::Intrinsic::IITDescriptor> &infos) { + using namespace llvm::Intrinsic; + + auto *context = &cgf.getMLIRContext(); + IITDescriptor descriptor = infos.front(); + infos = infos.slice(1); + + switch (descriptor.Kind) { + case IITDescriptor::Void: + return VoidType::get(context); + case IITDescriptor::Integer: + return IntType::get(context, descriptor.Integer_Width, /*isSigned=*/true); + case IITDescriptor::Float: + return SingleType::get(context); + case IITDescriptor::Double: + return DoubleType::get(context); + default: + cgf.cgm.errorNYI("intrinsic return types"); + return VoidType::get(context); + } +} + +// llvm::Intrinsics accepts only LLVMContext. We need to reimplement it here. +static cir::FuncType getIntrinsicType(CIRGenFunction &cgf, + llvm::Intrinsic::ID id) { + using namespace llvm::Intrinsic; + + SmallVector<IITDescriptor, 8> table; + getIntrinsicInfoTableEntries(id, table); + + ArrayRef<IITDescriptor> tableRef = table; + mlir::Type resultTy = decodeFixedType(cgf, tableRef); + + SmallVector<mlir::Type, 8> argTypes; + while (!tableRef.empty()) + argTypes.push_back(decodeFixedType(cgf, tableRef)); + + return FuncType::get(argTypes, resultTy); +} + +static mlir::Value emitTargetArchBuiltinExpr(CIRGenFunction *cgf, + unsigned builtinID, + const CallExpr *e, + ReturnValueSlot returnValue, + llvm::Triple::ArchType arch) { + return {}; +} + +mlir::Value CIRGenFunction::emitTargetBuiltinExpr(unsigned builtinID, + const CallExpr *e, + ReturnValueSlot returnValue) { + if (getContext().BuiltinInfo.isAuxBuiltinID(builtinID)) { + assert(getContext().getAuxTargetInfo() && "Missing aux target info"); + return emitTargetArchBuiltinExpr( + this, getContext().BuiltinInfo.getAuxBuiltinID(builtinID), e, + returnValue, getContext().getAuxTargetInfo()->getTriple().getArch()); + } + + return emitTargetArchBuiltinExpr(this, builtinID, e, returnValue, + getTarget().getTriple().getArch()); +} + +mlir::Value CIRGenFunction::emitScalarOrConstFoldImmArg(unsigned iceArguments, + unsigned idx, + const CallExpr *e) { + mlir::Value arg = {}; + if ((iceArguments & (1 << idx)) == 0) { + arg = emitScalarExpr(e->getArg(idx)); + } else { + // If this is required to be a constant, constant fold it so that we + // know that the generated intrinsic gets a ConstantInt. + std::optional<llvm::APSInt> result = + e->getArg(idx)->getIntegerConstantExpr(getContext()); + assert(result && "Expected argument to be a constant"); + arg = builder.getConstInt(getLoc(e->getSourceRange()), *result); + } + return arg; +} + +RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID, + const CallExpr *e, + ReturnValueSlot returnValue) { + const FunctionDecl *fd = gd.getDecl()->getAsFunction(); + + // See if we can constant fold this builtin. If so, don't emit it at all. + // TODO: Extend this handling to all builtin calls that we can constant-fold. + Expr::EvalResult result; + if (e->isPRValue() && e->EvaluateAsRValue(result, cgm.getASTContext()) && + !result.hasSideEffects()) { + if (result.Val.isInt()) { + return RValue::get(builder.getConstInt(getLoc(e->getSourceRange()), + result.Val.getInt())); + } + if (result.Val.isFloat()) { + // Note: we are using result type of CallExpr to determine the type of + // the constant. Clang Codegen uses the result value to make judgement + // of the type. We feel it should be Ok to use expression type because + // it is hard to imagine a builtin function evaluates to + // a value that over/underflows its own defined type. + mlir::Type resTy = convertType(e->getType()); + return RValue::get(builder.getConstFP(getLoc(e->getExprLoc()), resTy, + result.Val.getFloat())); + } + } + + // If current long-double semantics is IEEE 128-bit, replace math builtins + // of long-double with f128 equivalent. + // TODO: This mutation should also be applied to other targets other than PPC, + // after backend supports IEEE 128-bit style libcalls. + if (getTarget().getTriple().isPPC64() && + &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad()) { + cgm.errorNYI("long double builtin mutation"); + } + + // If the builtin has been declared explicitly with an assembler label, + // disable the specialized emitting below. Ideally we should communicate the + // rename in IR, or at least avoid generating the intrinsic calls that are + // likely to get lowered to the renamed library functions. + const unsigned builtinIDIfNoAsmLabel = + fd->hasAttr<AsmLabelAttr>() ? 0 : builtinID; + + std::optional<bool> errnoOverriden; + // ErrnoOverriden is true if math-errno is overriden via the + // '#pragma float_control(precise, on)'. This pragma disables fast-math, + // which implies math-errno. + if (e->hasStoredFPFeatures()) { + FPOptionsOverride op = e->getFPFeatures(); + if (op.hasMathErrnoOverride()) + errnoOverriden = op.getMathErrnoOverride(); + } + // True if 'atttibute__((optnone)) is used. This attibute overrides + // fast-math which implies math-errno. + bool optNone = curFuncDecl && curFuncDecl->hasAttr<OptimizeNoneAttr>(); + + // True if we are compiling at -O2 and errno has been disabled + // using the '#pragma float_control(precise, off)', and + // attribute opt-none hasn't been seen. + [[maybe_unused]] bool errnoOverridenToFalseWithOpt = + errnoOverriden.has_value() && !errnoOverriden.value() && !optNone && + cgm.getCodeGenOpts().OptimizationLevel != 0; + + // There are LLVM math intrinsics/instructions corresponding to math library + // functions except the LLVM op will never set errno while the math library + // might. Also, math builtins have the same semantics as their math library + // twins. Thus, we can transform math library and builtin calls to their + // LLVM counterparts if the call is marked 'const' (known to never set errno). + // In case FP exceptions are enabled, the experimental versions of the + // intrinsics model those. + [[maybe_unused]] bool constAlways = + getContext().BuiltinInfo.isConst(builtinID); + + // There's a special case with the fma builtins where they are always const + // if the target environment is GNU or the target is OS is Windows and we're + // targeting the MSVCRT.dll environment. + // FIXME: This list can be become outdated. Need to find a way to get it some + // other way. + switch (builtinID) { + case Builtin::BI__builtin_fma: + case Builtin::BI__builtin_fmaf: + case Builtin::BI__builtin_fmal: + case Builtin::BIfma: + case Builtin::BIfmaf: + case Builtin::BIfmal: + cgm.errorNYI("FMA builtins"); + break; + } + + bool constWithoutErrnoAndExceptions = + getContext().BuiltinInfo.isConstWithoutErrnoAndExceptions(builtinID); + bool constWithoutExceptions = + getContext().BuiltinInfo.isConstWithoutExceptions(builtinID); + + // ConstAttr is enabled in fast-math mode. In fast-math mode, math-errno is + // disabled. + // Math intrinsics are generated only when math-errno is disabled. Any pragmas + // or attributes that affect math-errno should prevent or allow math + // intrincs to be generated. Intrinsics are generated: + // 1- In fast math mode, unless math-errno is overriden + // via '#pragma float_control(precise, on)', or via an + // 'attribute__((optnone))'. + // 2- If math-errno was enabled on command line but overriden + // to false via '#pragma float_control(precise, off))' and + // 'attribute__((optnone))' hasn't been used. + // 3- If we are compiling with optimization and errno has been disabled + // via '#pragma float_control(precise, off)', and + // 'attribute__((optnone))' hasn't been used. + + bool constWithoutErrnoOrExceptions = + constWithoutErrnoAndExceptions || constWithoutExceptions; + bool generateIntrinsics = + (constAlways && !optNone) || + (!getLangOpts().MathErrno && + !(errnoOverriden.has_value() && errnoOverriden.value()) && !optNone); + if (!generateIntrinsics) { + generateIntrinsics = + constWithoutErrnoOrExceptions && !constWithoutErrnoAndExceptions; + if (!generateIntrinsics) + generateIntrinsics = + constWithoutErrnoOrExceptions && + (!getLangOpts().MathErrno && + !(errnoOverriden.has_value() && errnoOverriden.value()) && !optNone); + if (!generateIntrinsics) + generateIntrinsics = + constWithoutErrnoOrExceptions && errnoOverridenToFalseWithOpt; + } + + if (generateIntrinsics) { + assert(!cir::MissingFeatures::intrinsics()); + return {}; + } + + switch (builtinIDIfNoAsmLabel) { ---------------- andykaylor wrote:
Is there an errorNYI missing here? https://github.com/llvm/llvm-project/pull/142981 _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits