kpn created this revision.
kpn added reviewers: pengfei, chapuni, akshaykhadse, craig.topper, arsenm.
Herald added a subscriber: jdoerfert.
Herald added a project: All.
kpn requested review of this revision.
Herald added subscribers: llvm-commits, cfe-commits, wdng.
Herald added projects: clang, LLVM.

In D146869 <https://reviews.llvm.org/D146869> @arsenm pointed out that the 
constrained intrinsics aren't getting the strictfp attribute by default. They 
should be, since they are required to have it anyway.

TableGen did not know about this attribute until now. This patch adds strictfp 
to TableGen, and it uses it on all of the constrained intrinsics.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D154991

Files:
  clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
  llvm/include/llvm/IR/Intrinsics.td
  llvm/test/Feature/fp-intrinsics-attr.ll
  llvm/test/Verifier/fp-intrinsics-pass.ll
  llvm/utils/TableGen/CodeGenIntrinsics.cpp
  llvm/utils/TableGen/CodeGenIntrinsics.h
  llvm/utils/TableGen/IntrinsicEmitter.cpp

Index: llvm/utils/TableGen/IntrinsicEmitter.cpp
===================================================================
--- llvm/utils/TableGen/IntrinsicEmitter.cpp
+++ llvm/utils/TableGen/IntrinsicEmitter.cpp
@@ -388,6 +388,9 @@
   if (L->hasSideEffects != R->hasSideEffects)
     return R->hasSideEffects;
 
+  if (L->isStrictFP != R->isStrictFP)
+    return R->isStrictFP;
+
   // Try to order by readonly/readnone attribute.
   uint32_t LK = L->ME.toIntValue();
   uint32_t RK = R->ME.toIntValue();
@@ -522,6 +525,8 @@
       OS << "      Attribute::get(C, Attribute::Convergent),\n";
     if (Intrinsic.isSpeculatable)
       OS << "      Attribute::get(C, Attribute::Speculatable),\n";
+    if (Intrinsic.isStrictFP)
+      OS << "      Attribute::get(C, Attribute::StrictFP),\n";
 
     MemoryEffects ME = Intrinsic.ME;
     // TODO: IntrHasSideEffects should affect not only readnone intrinsics.
@@ -594,7 +599,8 @@
         Intrinsic.isNoReturn || Intrinsic.isNoCallback || Intrinsic.isNoSync ||
         Intrinsic.isNoFree || Intrinsic.isWillReturn || Intrinsic.isCold ||
         Intrinsic.isNoDuplicate || Intrinsic.isNoMerge ||
-        Intrinsic.isConvergent || Intrinsic.isSpeculatable) {
+        Intrinsic.isConvergent || Intrinsic.isSpeculatable ||
+        Intrinsic.isStrictFP) {
       unsigned ID = UniqFnAttributes.find(&Intrinsic)->second;
       OS << "      AS[" << numAttrs++ << "] = {AttributeList::FunctionIndex, "
          << "getIntrinsicFnAttributeSet(C, " << ID << ")};\n";
Index: llvm/utils/TableGen/CodeGenIntrinsics.h
===================================================================
--- llvm/utils/TableGen/CodeGenIntrinsics.h
+++ llvm/utils/TableGen/CodeGenIntrinsics.h
@@ -103,6 +103,9 @@
   // True if the intrinsic is marked as speculatable.
   bool isSpeculatable;
 
+  // True if the intrinsic is marked as strictfp.
+  bool isStrictFP;
+
   enum ArgAttrKind {
     NoCapture,
     NoAlias,
Index: llvm/utils/TableGen/CodeGenIntrinsics.cpp
===================================================================
--- llvm/utils/TableGen/CodeGenIntrinsics.cpp
+++ llvm/utils/TableGen/CodeGenIntrinsics.cpp
@@ -74,6 +74,7 @@
   isConvergent = false;
   isSpeculatable = false;
   hasSideEffects = false;
+  isStrictFP = false;
 
   if (DefName.size() <= 4 || DefName.substr(0, 4) != "int_")
     PrintFatalError(DefLoc,
@@ -203,6 +204,8 @@
     isSpeculatable = true;
   else if (R->getName() == "IntrHasSideEffects")
     hasSideEffects = true;
+  else if (R->getName() == "IntrStrictFP")
+    isStrictFP = true;
   else if (R->isSubClassOf("NoCapture")) {
     unsigned ArgNo = R->getValueAsInt("ArgNo");
     addArgAttribute(ArgNo, NoCapture);
Index: llvm/test/Verifier/fp-intrinsics-pass.ll
===================================================================
--- llvm/test/Verifier/fp-intrinsics-pass.ll
+++ llvm/test/Verifier/fp-intrinsics-pass.ll
@@ -1,7 +1,7 @@
 ; RUN: opt -passes=verify -S < %s 2>&1 | FileCheck %s
 
-declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata) #0
-declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata) #0
+declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
 
 ; Test that the verifier accepts legal code, and that the correct attributes are
 ; attached to the FP intrinsic. The attributes are checked at the bottom.
@@ -9,35 +9,34 @@
 ; CHECK: declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata) #[[ATTR]]
 ; Note: FP exceptions aren't usually caught through normal unwind mechanisms,
 ;       but we may want to revisit this for asynchronous exception handling.
-define double @f1(double %a, double %b) #0 {
+define double @f1(double %a, double %b) strictfp {
 ; CHECK-LABEL: define double @f1
-; CHECK-SAME: (double [[A:%.*]], double [[B:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-SAME: (double [[A:%.*]], double [[B:%.*]]) #[[STRICTFP:[0-9]+]] {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[FADD:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR1]]
+; CHECK-NEXT:    [[FADD:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.strict")
 ; CHECK-NEXT:    ret double [[FADD]]
 entry:
   %fadd = call double @llvm.experimental.constrained.fadd.f64(
                                                double %a, double %b,
                                                metadata !"round.dynamic",
-                                               metadata !"fpexcept.strict") #0
+                                               metadata !"fpexcept.strict")
   ret double %fadd
 }
 
-define double @f1u(double %a) #0 {
+define double @f1u(double %a) strictfp {
 ; CHECK-LABEL: define double @f1u
-; CHECK-SAME: (double [[A:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: (double [[A:%.*]]) #[[STRICTFP]] {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[FSQRT:%.*]] = call double @llvm.experimental.constrained.sqrt.f64(double [[A]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR1]]
+; CHECK-NEXT:    [[FSQRT:%.*]] = call double @llvm.experimental.constrained.sqrt.f64(double [[A]], metadata !"round.dynamic", metadata !"fpexcept.strict")
 ; CHECK-NEXT:    ret double [[FSQRT]]
 ;
 entry:
   %fsqrt = call double @llvm.experimental.constrained.sqrt.f64(
                                                double %a,
                                                metadata !"round.dynamic",
-                                               metadata !"fpexcept.strict") #0
+                                               metadata !"fpexcept.strict")
   ret double %fsqrt
 }
 
-attributes #0 = { strictfp }
-; TODO: Why is strictfp not in the below list?
-; CHECK: attributes #[[ATTR]] = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) }
+; CHECK: attributes #[[ATTR]] = { nocallback nofree nosync nounwind strictfp willreturn memory(inaccessiblemem: readwrite) }
+; CHECK: attributes #[[STRICTFP]] = { strictfp }
Index: llvm/test/Feature/fp-intrinsics-attr.ll
===================================================================
--- /dev/null
+++ llvm/test/Feature/fp-intrinsics-attr.ll
@@ -0,0 +1,318 @@
+; RUN: opt -passes=verify -S < %s | FileCheck %s
+
+; Test to verify that constrained intrinsics all have the strictfp attribute.
+; Ordering is from Intrinsics.td.
+
+define void @func(double %a, double %b, double %c, i32 %i) strictfp {
+; CHECK-LABEL: define void @func
+; CHECK-SAME: (double [[A:%.*]], double [[B:%.*]], double [[C:%.*]], i32 [[I:%.*]]) #[[ATTR0:[0-9]+]] {
+
+  %add = call double @llvm.experimental.constrained.fadd.f64(
+                                               double %a, double %b,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %sub = call double @llvm.experimental.constrained.fsub.f64(
+                                               double %a, double %b,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %mul = call double @llvm.experimental.constrained.fmul.f64(
+                                               double %a, double %b,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %div = call double @llvm.experimental.constrained.fdiv.f64(
+                                               double %a, double %b,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %rem = call double @llvm.experimental.constrained.frem.f64(
+                                               double %a, double %b,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %fma = call double @llvm.experimental.constrained.fma.f64(
+                                               double %a, double %b, double %c,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %fmuladd = call double @llvm.experimental.constrained.fmuladd.f64(
+                                               double %a, double %b, double %c,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %si = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a,
+                                               metadata !"fpexcept.strict")
+
+  %ui = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a,
+                                               metadata !"fpexcept.strict")
+
+  %sfp = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %i,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %ufp = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %i,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %fptrunc = call float @llvm.experimental.constrained.fptrunc.f32.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %ext = call double @llvm.experimental.constrained.fpext.f64.f32(
+                                               float %fptrunc,
+                                               metadata !"fpexcept.strict")
+
+  %sqrt = call double @llvm.experimental.constrained.sqrt.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %powi = call double @llvm.experimental.constrained.powi.f64(
+                                               double %a, i32 %i,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %sin = call double @llvm.experimental.constrained.sin.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %cos = call double @llvm.experimental.constrained.cos.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %pow = call double @llvm.experimental.constrained.pow.f64(
+                                               double %a, double %b,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %log = call double @llvm.experimental.constrained.log.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %log10 = call double @llvm.experimental.constrained.log10.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %log2 = call double @llvm.experimental.constrained.log2.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %exp = call double @llvm.experimental.constrained.exp.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %exp2 = call double @llvm.experimental.constrained.exp2.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %rint = call double @llvm.experimental.constrained.rint.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %neari = call double @llvm.experimental.constrained.nearbyint.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %x32 = call i32 @llvm.experimental.constrained.lrint.i32.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %x64 = call i64 @llvm.experimental.constrained.llrint.i64.f64(
+                                               double %a,
+                                               metadata !"round.dynamic",
+                                               metadata !"fpexcept.strict")
+
+  %maxnum = call double @llvm.experimental.constrained.maxnum.f64(
+                                               double %a, double %b,
+                                               metadata !"fpexcept.strict")
+
+  %minnum = call double @llvm.experimental.constrained.minnum.f64(
+                                               double %a, double %b,
+                                               metadata !"fpexcept.strict")
+
+  %maxmum = call double @llvm.experimental.constrained.maximum.f64(
+                                               double %a, double %b,
+                                               metadata !"fpexcept.strict")
+
+  %minmum = call double @llvm.experimental.constrained.minimum.f64(
+                                               double %a, double %b,
+                                               metadata !"fpexcept.strict")
+
+  %ceil = call double @llvm.experimental.constrained.ceil.f64(
+                                               double %a,
+                                               metadata !"fpexcept.strict")
+
+  %floor = call double @llvm.experimental.constrained.floor.f64(
+                                               double %a,
+                                               metadata !"fpexcept.strict")
+
+  %y32 = call i32 @llvm.experimental.constrained.lround.i32.f64(
+                                               double %a,
+                                               metadata !"fpexcept.strict")
+
+  %y64 = call i64 @llvm.experimental.constrained.llround.i64.f64(
+                                               double %a,
+                                               metadata !"fpexcept.strict")
+
+  %round = call double @llvm.experimental.constrained.round.f64(
+                                               double %a,
+                                               metadata !"fpexcept.strict")
+
+  %roundev = call double @llvm.experimental.constrained.roundeven.f64(
+                                               double %a,
+                                               metadata !"fpexcept.strict")
+
+  %trunc = call double @llvm.experimental.constrained.trunc.f64(
+                                               double %a,
+                                               metadata !"fpexcept.strict")
+
+  %q1 = call i1 @llvm.experimental.constrained.fcmp.f64(
+                                               double %a, double %b,
+                                               metadata !"oeq",
+                                               metadata !"fpexcept.strict")
+
+  %s1 = call i1 @llvm.experimental.constrained.fcmps.f64(
+                                               double %a, double %b,
+                                               metadata !"oeq",
+                                               metadata !"fpexcept.strict")
+
+; CHECK: ret void
+  ret void
+}
+
+declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fadd.f64({{.*}}) #[[ATTR1:[0-9]+]]
+
+declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fsub.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fmul.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fdiv.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.frem.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fma.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.fmuladd.f64(double, double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fmuladd.f64({{.*}}) #[[ATTR1]]
+
+declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.fptosi.i32.f64({{.*}}) #[[ATTR1]]
+
+declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.fptoui.i32.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.sitofp.f64.i32({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.uitofp.f64.i32({{.*}}) #[[ATTR1]]
+
+declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fptrunc.f32.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
+; CHECK: @llvm.experimental.constrained.fpext.f64.f32({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.sqrt.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.powi.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.sin.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.cos.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.pow.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.log.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.log10.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.log2.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.exp.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.exp2.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.rint.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.nearbyint.f64({{.*}}) #[[ATTR1]]
+
+declare i32 @llvm.experimental.constrained.lrint.i32.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.lrint.i32.f64({{.*}}) #[[ATTR1]]
+
+declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.llrint.i64.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata)
+; CHECK: @llvm.experimental.constrained.maxnum.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata)
+; CHECK: @llvm.experimental.constrained.minnum.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.maximum.f64(double, double, metadata)
+; CHECK: @llvm.experimental.constrained.maximum.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.minimum.f64(double, double, metadata)
+; CHECK: @llvm.experimental.constrained.minimum.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.ceil.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.floor.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.floor.f64({{.*}}) #[[ATTR1]]
+
+declare i32 @llvm.experimental.constrained.lround.i32.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.lround.i32.f64({{.*}}) #[[ATTR1]]
+
+declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.llround.i64.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.round.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.round.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.roundeven.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.roundeven.f64({{.*}}) #[[ATTR1]]
+
+declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
+; CHECK: @llvm.experimental.constrained.trunc.f64({{.*}}) #[[ATTR1]]
+
+declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fcmp.f64({{.*}}) #[[ATTR1]]
+
+declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.fcmps.f64({{.*}}) #[[ATTR1]]
+
+; CHECK: attributes #[[ATTR0]] = {{{.*}} strictfp {{.*}}}
+; CHECK: attributes #[[ATTR1]] = { {{.*}} strictfp {{.*}} }
+
Index: llvm/include/llvm/IR/Intrinsics.td
===================================================================
--- llvm/include/llvm/IR/Intrinsics.td
+++ llvm/include/llvm/IR/Intrinsics.td
@@ -1099,7 +1099,11 @@
 //===--------------- Constrained Floating Point Intrinsics ----------------===//
 //
 
-let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn] in {
+/// IntrStrictFP - The intrinsic is allowed to be used in an alternate
+/// floating point environment.
+def IntrStrictFP : IntrinsicProperty;
+
+let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn, IntrStrictFP] in {
   def int_experimental_constrained_fadd : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                     [ LLVMMatchType<0>,
                                                       LLVMMatchType<0>,
Index: clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
===================================================================
--- clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
+++ clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
@@ -171,7 +171,7 @@
 // STRICTFP: attributes #[[ATTR0]] = { convergent noinline norecurse nounwind optnone strictfp "stack-protector-buffer-size"="8" "uniform-work-group-size"="false" }
 // STRICTFP: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
 // STRICTFP: attributes #[[ATTR2]] = { convergent noinline nounwind optnone strictfp "stack-protector-buffer-size"="8" }
-// STRICTFP: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) }
+// STRICTFP: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind strictfp willreturn memory(inaccessiblemem: readwrite) }
 // STRICTFP: attributes #[[ATTR4]] = { convergent nounwind "stack-protector-buffer-size"="8" }
 // STRICTFP: attributes #[[ATTR5]] = { strictfp }
 //.
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to