gandhi21299 updated this revision to Diff 362522.
gandhi21299 added a comment.
Herald added subscribers: llvm-commits, foad, hiraditya.
Herald added a project: LLVM.

- removed `kernel` from functions taking in `__generic` qualified `addr`


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D106909/new/

https://reviews.llvm.org/D106909

Files:
  clang/test/CodeGenCUDA/fp-atomics-optremarks.cu
  clang/test/CodeGenOpenCL/builtins-fp-atomics.cl
  clang/test/CodeGenOpenCL/fp-atomics-optremarks-gfx90a.cl
  llvm/lib/Target/AMDGPU/SIISelLowering.cpp
  llvm/lib/Target/AMDGPU/SIISelLowering.h

Index: llvm/lib/Target/AMDGPU/SIISelLowering.h
===================================================================
--- llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -30,6 +30,7 @@
 class SITargetLowering final : public AMDGPUTargetLowering {
 private:
   const GCNSubtarget *Subtarget;
+  OptimizationRemarkEmitter *ORE;
 
 public:
   MVT getRegisterTypeForCallingConv(LLVMContext &Context,
Index: llvm/lib/Target/AMDGPU/SIISelLowering.cpp
===================================================================
--- llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -19,6 +19,7 @@
 #include "SIRegisterInfo.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
+#include "llvm/Analysis/OptimizationRemarkEmitter.h"
 #include "llvm/BinaryFormat/ELF.h"
 #include "llvm/CodeGen/Analysis.h"
 #include "llvm/CodeGen/FunctionLoweringInfo.h"
@@ -12117,6 +12118,27 @@
   return DenormMode == DenormalMode::getIEEE();
 }
 
+static TargetLowering::AtomicExpansionKind
+atomicExpandReturn(OptimizationRemarkEmitter *ORE, AtomicRMWInst *RMW,
+                   TargetLowering::AtomicExpansionKind Kind, bool UnsafeFlag) {
+  ORE = new OptimizationRemarkEmitter(RMW->getFunction());
+  if (Kind == TargetLowering::AtomicExpansionKind::CmpXChg) {
+    ORE->emit([&]() {
+      OptimizationRemark Remark(DEBUG_TYPE, "Passed", RMW->getFunction());
+      Remark << "An FP atomic instruction was expanded into a CAS loop.";
+      return Remark;
+    });
+  } else if (Kind == TargetLowering::AtomicExpansionKind::None && UnsafeFlag) {
+    ORE->emit([&]() {
+      OptimizationRemark Remark(DEBUG_TYPE, "Passed", RMW->getFunction());
+      Remark << "An unsafe hardware instruction was generated.";
+      return Remark;
+    });
+  }
+  delete ORE;
+  return Kind;
+}
+
 TargetLowering::AtomicExpansionKind
 SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
   switch (RMW->getOperation()) {
@@ -12132,35 +12154,43 @@
       return AtomicExpansionKind::CmpXChg;
 
     unsigned AS = RMW->getPointerAddressSpace();
-
+    bool UnsafeFPAtomicFlag = RMW->getFunction()
+                                  ->getFnAttribute("amdgpu-unsafe-fp-atomics")
+                                  .getValueAsBool();
     if ((AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) &&
          Subtarget->hasAtomicFaddInsts()) {
       // The amdgpu-unsafe-fp-atomics attribute enables generation of unsafe
       // floating point atomic instructions. May generate more efficient code,
       // but may not respect rounding and denormal modes, and may give incorrect
       // results for certain memory destinations.
-      if (RMW->getFunction()
-              ->getFnAttribute("amdgpu-unsafe-fp-atomics")
-              .getValueAsString() != "true")
-        return AtomicExpansionKind::CmpXChg;
+      if (!UnsafeFPAtomicFlag)
+        return atomicExpandReturn(ORE, RMW, AtomicExpansionKind::CmpXChg,
+                                  UnsafeFPAtomicFlag);
+      atomicExpandReturn(ORE, RMW, AtomicExpansionKind::None, 
+        UnsafeFPAtomicFlag);
 
       if (Subtarget->hasGFX90AInsts()) {
         if (Ty->isFloatTy() && AS == AMDGPUAS::FLAT_ADDRESS)
-          return AtomicExpansionKind::CmpXChg;
+          return atomicExpandReturn(ORE, RMW, AtomicExpansionKind::CmpXChg,
+                                    UnsafeFPAtomicFlag);
 
         auto SSID = RMW->getSyncScopeID();
         if (SSID == SyncScope::System ||
             SSID == RMW->getContext().getOrInsertSyncScopeID("one-as"))
-          return AtomicExpansionKind::CmpXChg;
+          return atomicExpandReturn(ORE, RMW, AtomicExpansionKind::CmpXChg,
+                                    UnsafeFPAtomicFlag);
 
-        return AtomicExpansionKind::None;
+        return atomicExpandReturn(ORE, RMW, AtomicExpansionKind::None,
+                                  UnsafeFPAtomicFlag);
       }
 
       if (AS == AMDGPUAS::FLAT_ADDRESS)
-        return AtomicExpansionKind::CmpXChg;
+        return atomicExpandReturn(ORE, RMW, AtomicExpansionKind::CmpXChg,
+                                  UnsafeFPAtomicFlag);
 
-      return RMW->use_empty() ? AtomicExpansionKind::None
-                              : AtomicExpansionKind::CmpXChg;
+      auto Kind = RMW->use_empty() ? AtomicExpansionKind::None
+                                   : AtomicExpansionKind::CmpXChg;
+      return atomicExpandReturn(ORE, RMW, Kind, UnsafeFPAtomicFlag);
     }
 
     // DS FP atomics do repect the denormal mode, but the rounding mode is fixed
@@ -12168,17 +12198,17 @@
     // The only exception is DS_ADD_F64 which never flushes regardless of mode.
     if (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) {
       if (!Ty->isDoubleTy())
-        return AtomicExpansionKind::None;
+        return atomicExpandReturn(ORE, RMW, AtomicExpansionKind::None,
+                                  UnsafeFPAtomicFlag);
 
-      return (fpModeMatchesGlobalFPAtomicMode(RMW) ||
-              RMW->getFunction()
-                      ->getFnAttribute("amdgpu-unsafe-fp-atomics")
-                      .getValueAsString() == "true")
-                 ? AtomicExpansionKind::None
-                 : AtomicExpansionKind::CmpXChg;
+      auto Kind = (fpModeMatchesGlobalFPAtomicMode(RMW) || UnsafeFPAtomicFlag)
+                      ? AtomicExpansionKind::None
+                      : AtomicExpansionKind::CmpXChg;
+      return atomicExpandReturn(ORE, RMW, Kind, UnsafeFPAtomicFlag);
     }
 
-    return AtomicExpansionKind::CmpXChg;
+    return atomicExpandReturn(ORE, RMW, AtomicExpansionKind::CmpXChg,
+                              UnsafeFPAtomicFlag);
   }
   default:
     break;
Index: clang/test/CodeGenOpenCL/fp-atomics-optremarks-gfx90a.cl
===================================================================
--- /dev/null
+++ clang/test/CodeGenOpenCL/fp-atomics-optremarks-gfx90a.cl
@@ -0,0 +1,28 @@
+// RUN: %clang_cc1 -cl-std=CL2.0 -triple amdgcn-amd-amdhsa -target-cpu gfx90a \
+// RUN:     -Rpass=si-lower -munsafe-fp-atomics %s -S -o - 2>&1 \
+// RUN:     | FileCheck %s -check-prefix=GFX90A-HW
+
+typedef enum memory_order {
+  memory_order_relaxed = __ATOMIC_RELAXED,
+  memory_order_acquire = __ATOMIC_ACQUIRE,
+  memory_order_release = __ATOMIC_RELEASE,
+  memory_order_acq_rel = __ATOMIC_ACQ_REL,
+  memory_order_seq_cst = __ATOMIC_SEQ_CST
+} memory_order;
+
+typedef enum memory_scope {
+  memory_scope_work_item = __OPENCL_MEMORY_SCOPE_WORK_ITEM,
+  memory_scope_work_group = __OPENCL_MEMORY_SCOPE_WORK_GROUP,
+  memory_scope_device = __OPENCL_MEMORY_SCOPE_DEVICE,
+  memory_scope_all_svm_devices = __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES,
+#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups)
+  memory_scope_sub_group = __OPENCL_MEMORY_SCOPE_SUB_GROUP
+#endif
+} memory_scope;
+
+// remark: An unsafe hardware instruction was generated.
+// GFX90A-HW-LABEL: test_atomic_add
+// GFX90A-HW:   global_atomic_add_f64
+float test_atomic_add(global atomic_double *d, double a) {
+  return __opencl_atomic_fetch_add(d, a, memory_order_relaxed, memory_scope_work_group);
+}
Index: clang/test/CodeGenOpenCL/builtins-fp-atomics.cl
===================================================================
--- /dev/null
+++ clang/test/CodeGenOpenCL/builtins-fp-atomics.cl
@@ -0,0 +1,133 @@
+// RUN: %clang_cc1 -cl-std=CL2.0 -triple amdgcn-amd-amdhsa -target-cpu gfx90a \
+// RUN:   %s -S -emit-llvm -o - | FileCheck %s -check-prefix=CHECK
+
+// RUN: %clang_cc1 -cl-std=CL2.0 -triple amdgcn-amd-amdhsa -target-cpu gfx90a \
+// RUN:   -S -o - %s | FileCheck -check-prefix=GFX90A %s
+
+
+typedef enum memory_order {
+  memory_order_relaxed = __ATOMIC_RELAXED,
+  memory_order_acquire = __ATOMIC_ACQUIRE,
+  memory_order_release = __ATOMIC_RELEASE,
+  memory_order_acq_rel = __ATOMIC_ACQ_REL,
+  memory_order_seq_cst = __ATOMIC_SEQ_CST
+} memory_order;
+
+typedef half __attribute__((ext_vector_type(2))) half2;
+
+// CHECK-LABEL: test_global_add
+// CHECK: tail call double @llvm.amdgcn.global.atomic.fadd.f64.p1f64.f64(double addrspace(1)* %{{.*}}, double %{{.*}})
+// GFX90A:  test_global_add
+// GFX90A:  global_atomic_add_f64 v2, v[0:1], s[0:1]
+// GFX90A:  s_endpgm
+kernel void test_global_add(__global double *addr, double x) {
+  __builtin_amdgcn_global_atomic_fadd_f64(addr, x, memory_order_relaxed, "workgroup");
+}
+
+// CHECK-LABEL: test_global_addf
+// CHECK: tail call float @llvm.amdgcn.global.atomic.fadd.f32.p1f32.f32(float addrspace(1)* %{{.*}}, float %{{.*}})
+// GFX90A-LABEL: test_global_addf
+// GFX90A: global_atomic_add_f32 v0, v1, s[0:1]
+// GFX90A: s_endpgm
+kernel void test_global_addf(__global float *addr, float x) {
+  __builtin_amdgcn_global_atomic_fadd_f32(addr, x, memory_order_relaxed, "workgroup");
+}
+
+// CHECK-LABEL: test_global_add2h
+// CHECK: tail call <2 x half> @llvm.amdgcn.global.atomic.fadd.v2f16.p1v2f16.v2f16(<2 x half> addrspace(1)* %{{.*}}, <2 x half> %{{.*}})
+// GFX90A-LABEL: test_global_add2h
+// GFX90A: global_atomic_pk_add_f16 v0, v1, s[0:1]
+// GFX90A: s_endpgm
+kernel void test_global_add2h(__global half2 *addr, half2 x){
+  __builtin_amdgcn_global_atomic_fadd_2f16(addr, x, memory_order_relaxed, "workgroup");
+}
+
+// CHECK-LABEL: test_global_global_min
+// CHECK: tail call double @llvm.amdgcn.global.atomic.fmin.f64.p1f64.f64(double addrspace(1)* %{{.*}}, double %{{.*}})
+// GFX90A-LABEL:  test_global_global_min
+// GFX90A:  global_atomic_min_f64 v2, v[0:1], s[0:1]
+// GFX90A:  s_endpgm
+kernel void test_global_global_min(__global double *addr, double x){
+  __builtin_amdgcn_global_atomic_fmin_f64(addr, x, memory_order_relaxed, "workgroup");
+}
+
+// CHECK-LABEL: test_global_max
+// CHECK: tail call double @llvm.amdgcn.global.atomic.fmax.f64.p1f64.f64(double addrspace(1)* %{{.*}}, double %{{.*}})
+// GFX90A:  test_global_max
+// GFX90A:  global_atomic_max_f64 v2, v[0:1], s[0:1]
+// GFX90A:  s_endpgm
+kernel void test_global_max(__global double *addr, double x){
+  __builtin_amdgcn_global_atomic_fmax_f64(addr, x, memory_order_relaxed, "workgroup");
+}
+
+// CHECK-LABEL: test_flat_add_local
+// CHECK: tail call double @llvm.amdgcn.flat.atomic.fadd.f64.p3f64.f64(double addrspace(3)* %{{.*}}, double %{{.*}})
+// GFX90A:  test_flat_add_local
+// GFX90A:  ds_add_f64 v2, v[0:1]
+// GFX90A:  s_endpgm
+kernel void test_flat_add_local(__local double *addr, double x){
+  __builtin_amdgcn_flat_atomic_fadd_f64(addr, x, memory_order_relaxed, "workgroup");
+}
+
+// CHECK-LABEL: test_flat_global_add
+// CHECK: tail call double @llvm.amdgcn.flat.atomic.fadd.f64.p1f64.f64(double addrspace(1)* %{{.*}}, double %{{.*}})
+// GFX90A:  test_flat_global_add
+// GFX90A:  global_atomic_add_f64
+// GFX90A:  s_endpgm
+kernel void test_flat_global_add(__global double *addr, double x){
+  __builtin_amdgcn_flat_atomic_fadd_f64(addr, x, memory_order_relaxed, "workgroup");
+}
+
+// CHECK-LABEL: test_flat_min_constant
+// CHECK: tail call double @llvm.amdgcn.flat.atomic.fmin.f64.p4f64.f64(double addrspace(4)* %{{.*}}, double %{{.*}})
+// GFX90A:  test_flat_min_constant
+// GFX90A:  global_atomic_min_f64
+// GFX90A:  s_endpgm
+void test_flat_min_constant(__generic double *addr, double x){
+  __builtin_amdgcn_flat_atomic_fmin_f64(addr, x, memory_order_relaxed, "workgroup");
+}
+
+// CHECK-LABEL: test_flat_global_min
+// CHECK: tail call double @llvm.amdgcn.flat.atomic.fmin.f64.p1f64.f64(double addrspace(1)* %{{.*}}, double %{{.*}})
+// GFX90A:  test_flat_global_min
+// GFX90A:  global_atomic_min_f64
+// GFX90A:  s_endpgm
+kernel void test_flat_global_min(__global double *addr, double x){
+  __builtin_amdgcn_flat_atomic_fmin_f64(addr, x, memory_order_relaxed, "workgroup");
+}
+
+// CHECK-LABEL: test_flat_max_constant
+// CHECK: tail call double @llvm.amdgcn.flat.atomic.fmax.f64.p4f64.f64(double addrspace(4)* %{{.*}}, double %{{.*}})
+// GFX90A-LABEL:  test_flat_max_constant
+// GFX90A:  global_atomic_max_f64 v2, v[0:1], s[0:1]
+// GFX90A:  s_endpgm
+void test_flat_max_constant(__generic double *addr, double x){
+  __builtin_amdgcn_flat_atomic_fmax_f64(addr, x, memory_order_relaxed, "workgroup");
+}
+
+// CHECK-LABEL: test_flat_global_max
+// CHECK: tail call double @llvm.amdgcn.flat.atomic.fmax.f64.p1f64.f64(double addrspace(1)* %{{.*}}, double %{{.*}})
+// GFX90A-LABEL:  test_flat_global_max
+// GFX90A:  global_atomic_max_f64 v2, v[0:1], s[0:1]
+// GFX90A:  s_endpgm
+kernel void test_flat_global_max(__global double *addr, double x){
+  __builtin_amdgcn_flat_atomic_fmax_f64(addr, x, memory_order_relaxed, "workgroup");
+}
+
+// CHECK-LABEL: test_ds_add_local
+// CHECK: tail call double @llvm.amdgcn.ds.fadd.f64(double addrspace(3)* %{{.*}}, double %{{.*}},
+// GFX90A:  test_ds_add_local
+// GFX90A:  ds_add_f64 v2, v[0:1]
+// GFX90A:  s_endpgm
+kernel void test_ds_add_local(__local double *addr, double x){
+  __builtin_amdgcn_ds_atomic_fadd_f64(addr, x, memory_order_relaxed, "workgroup");
+}
+
+// CHECK-LABEL: test_ds_addf_local
+// CHECK: tail call float @llvm.amdgcn.ds.fadd.f32(float addrspace(3)* %{{.*}}, float %{{.*}},
+// GFX90A-LABEL:  test_ds_addf_local
+// GFX90A:  ds_add_f32 v0, v1
+// GFX90A:  s_endpgm
+kernel void test_ds_addf_local(__local float *addr, float x){
+  __builtin_amdgcn_ds_atomic_fadd_f32(addr, x, memory_order_relaxed, "workgroup");
+}
Index: clang/test/CodeGenCUDA/fp-atomics-optremarks.cu
===================================================================
--- /dev/null
+++ clang/test/CodeGenCUDA/fp-atomics-optremarks.cu
@@ -0,0 +1,16 @@
+// RUN: %clang_cc1 %s -triple=amdgcn-amd-amdhsa -fcuda-is-device \
+// RUN:   -target-cpu gfx90a -Rpass=.* -S -o - 2>&1 | \
+// RUN:   FileCheck %s --check-prefix=GFX90A-CAS
+
+// REQUIRES: amdgpu-registered-target
+
+#include "Inputs/cuda.h"
+#include <stdatomic.h>
+
+// GFX90A-CAS: An FP atomic instruction was expanded into a CAS loop.
+// GFX90A-CAS-LABEL: _Z14atomic_add_casPf
+// GFX90A-CAS:  flat_atomic_cmpswap v0, v[2:3], v[4:5] glc
+// GFX90A-CAS:  s_cbranch_execnz
+__device__ float atomic_add_cas(float *p) {
+  return __atomic_fetch_add(p, 1.0f, memory_order_relaxed);
+}
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to