https://github.com/vitor1001 updated 
https://github.com/llvm/llvm-project/pull/143664

>From 079672a23dd5ef832f4fa9b974b3b23b2b48a207 Mon Sep 17 00:00:00 2001
From: Vitor Sessak <vses...@google.com>
Date: Wed, 11 Jun 2025 08:21:48 +0000
Subject: [PATCH] Add missing intrinsics to cuda headers.

LLVM prevents the sm_32_intrinsics.hpp header from being included with a 
#define __SM_32_INTRINSICS_HPP__. It also provides drop-in replacements of the 
functions defined in the CUDA header.

One issue is that some intrinsics were added after the replacement was written, 
and thus have no replacement, breaking code that calls them (Raft is one 
example).

This CL backport the code from sm_32_intrinsics.hpp for the missing intrinsics.
---
 clang/lib/Headers/__clang_cuda_intrinsics.h | 286 +++++++++++++++++++-
 1 file changed, 285 insertions(+), 1 deletion(-)

diff --git a/clang/lib/Headers/__clang_cuda_intrinsics.h 
b/clang/lib/Headers/__clang_cuda_intrinsics.h
index 8b230af6f6647..e3a2fd8833728 100644
--- a/clang/lib/Headers/__clang_cuda_intrinsics.h
+++ b/clang/lib/Headers/__clang_cuda_intrinsics.h
@@ -479,7 +479,291 @@ inline __device__ unsigned __funnelshift_rc(unsigned 
low32, unsigned high32,
   return ret;
 }
 
-#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
+#pragma push_macro("__INTRINSIC_LOAD")
+#define __INTRINSIC_LOAD(__FnName, __AsmOp, __DeclType, __TmpType, __AsmType, \
+                         __Clobber)                                           \
+  inline __device__ __DeclType __FnName(const __DeclType *ptr) {              \
+    __TmpType ret;                                                            \
+    asm(__AsmOp " %0, [%1];" : __AsmType(ret) : "l"(ptr)__Clobber);           \
+    return (__DeclType)ret;                                                   \
+  }
+
+#pragma push_macro("__INTRINSIC_LOAD2")
+#define __INTRINSIC_LOAD2(__FnName, __AsmOp, __DeclType, __TmpType, __AsmType, 
\
+                          __Clobber)                                           
\
+  inline __device__ __DeclType __FnName(const __DeclType *ptr) {               
\
+    __DeclType ret;                                                            
\
+    __TmpType tmp;                                                             
\
+    asm(__AsmOp " {%0,%1}, [%2];"                                              
\
+        : __AsmType(tmp.x), __AsmType(tmp.y)                                   
\
+        : "l"(ptr)__Clobber);                                                  
\
+    using __ElementType = decltype(ret.x);                                     
\
+    ret.x = (__ElementType)(tmp.x);                                            
\
+    ret.y = (__ElementType)tmp.y;                                              
\
+    return ret;                                                                
\
+  }
+
+#pragma push_macro("__INTRINSIC_LOAD4")
+#define __INTRINSIC_LOAD4(__FnName, __AsmOp, __DeclType, __TmpType, __AsmType, 
\
+                          __Clobber)                                           
\
+  inline __device__ __DeclType __FnName(const __DeclType *ptr) {               
\
+    __DeclType ret;                                                            
\
+    __TmpType tmp;                                                             
\
+    asm(__AsmOp " {%0,%1,%2,%3}, [%4];"                                        
\
+        : __AsmType(tmp.x), __AsmType(tmp.y), __AsmType(tmp.z),                
\
+          __AsmType(tmp.w)                                                     
\
+        : "l"(ptr)__Clobber);                                                  
\
+    using __ElementType = decltype(ret.x);                                     
\
+    ret.x = (__ElementType)tmp.x;                                              
\
+    ret.y = (__ElementType)tmp.y;                                              
\
+    ret.z = (__ElementType)tmp.z;                                              
\
+    ret.w = (__ElementType)tmp.w;                                              
\
+    return ret;                                                                
\
+  }
+
+__INTRINSIC_LOAD(__ldcg, "ld.global.cg.s8", char, unsigned int, "=r", );
+__INTRINSIC_LOAD(__ldcg, "ld.global.cg.s8", signed char, unsigned int, "=r", );
+__INTRINSIC_LOAD(__ldcg, "ld.global.cg.s16", short, unsigned short, "=h", );
+__INTRINSIC_LOAD(__ldcg, "ld.global.cg.s32", int, unsigned int, "=r", );
+__INTRINSIC_LOAD(__ldcg, "ld.global.cg.s64", long long, unsigned long long,
+                 "=l", );
+
+__INTRINSIC_LOAD2(__ldcg, "ld.global.cg.v2.s8", char2, int2, "=r", );
+__INTRINSIC_LOAD4(__ldcg, "ld.global.cg.v4.s8", char4, int4, "=r", );
+__INTRINSIC_LOAD2(__ldcg, "ld.global.cg.v2.s16", short2, short2, "=h", );
+__INTRINSIC_LOAD4(__ldcg, "ld.global.cg.v4.s16", short4, short4, "=h", );
+__INTRINSIC_LOAD2(__ldcg, "ld.global.cg.v2.s32", int2, int2, "=r", );
+__INTRINSIC_LOAD4(__ldcg, "ld.global.cg.v4.s32", int4, int4, "=r", );
+__INTRINSIC_LOAD2(__ldcg, "ld.global.cg.v2.s64 ", longlong2, longlong2, "=l", 
);
+
+__INTRINSIC_LOAD(__ldcg, "ld.global.cg.u8", unsigned char, unsigned int,
+                 "=r", );
+__INTRINSIC_LOAD(__ldcg, "ld.global.cg.u16", unsigned short, unsigned short,
+                 "=h", );
+__INTRINSIC_LOAD(__ldcg, "ld.global.cg.u32", unsigned int, unsigned int,
+                 "=r", );
+__INTRINSIC_LOAD(__ldcg, "ld.global.cg.u64", unsigned long long,
+                 unsigned long long, "=l", );
+
+__INTRINSIC_LOAD2(__ldcg, "ld.global.cg.v2.u8", uchar2, int2, "=r", );
+__INTRINSIC_LOAD4(__ldcg, "ld.global.cg.v4.u8", uchar4, int4, "=r", );
+__INTRINSIC_LOAD2(__ldcg, "ld.global.cg.v2.u16", ushort2, ushort2, "=h", );
+__INTRINSIC_LOAD4(__ldcg, "ld.global.cg.v4.u16", ushort4, ushort4, "=h", );
+__INTRINSIC_LOAD2(__ldcg, "ld.global.cg.v2.u32", uint2, uint2, "=r", );
+__INTRINSIC_LOAD4(__ldcg, "ld.global.cg.v4.u32", uint4, uint4, "=r", );
+__INTRINSIC_LOAD2(__ldcg, "ld.global.cg.v2.u64", ulonglong2, ulonglong2,
+                  "=l", );
+
+__INTRINSIC_LOAD(__ldcg, "ld.global.cg.f32", float, float, "=f", );
+__INTRINSIC_LOAD(__ldcg, "ld.global.cg.f64", double, double, "=d", );
+__INTRINSIC_LOAD2(__ldcg, "ld.global.cg.v2.f32", float2, float2, "=f", );
+__INTRINSIC_LOAD4(__ldcg, "ld.global.cg.v4.f32", float4, float4, "=f", );
+__INTRINSIC_LOAD2(__ldcg, "ld.global.cg.v2.f64", double2, double2, "=d", );
+
+inline __device__ long __ldcg(const long *ptr) {
+  unsigned long ret;
+  if (sizeof(long) == 8) {
+    asm("ld.global.cg.s64 %0, [%1];" : "=l"(ret) : "l"(ptr));
+  } else {
+    asm("ld.global.cg.s32 %0, [%1];" : "=r"(ret) : "l"(ptr));
+  }
+  return (long)ret;
+}
+
+__INTRINSIC_LOAD(__ldcv, "ld.global.cv.u8", unsigned char, unsigned int,
+                 "=r", : "memory");
+__INTRINSIC_LOAD(__ldcv, "ld.global.cv.u16", unsigned short, unsigned short,
+                 "=h", : "memory");
+__INTRINSIC_LOAD(__ldcv, "ld.global.cv.u32", unsigned int, unsigned int,
+                 "=r", : "memory");
+__INTRINSIC_LOAD(__ldcv, "ld.global.cv.u64", unsigned long long,
+                 unsigned long long, "=l", : "memory");
+
+__INTRINSIC_LOAD(__ldcv, "ld.global.cv.s8", char, unsigned int,
+                 "=r", : "memory");
+__INTRINSIC_LOAD(__ldcv, "ld.global.cv.s8", signed char, unsigned int,
+                 "=r", : "memory");
+__INTRINSIC_LOAD(__ldcv, "ld.global.cv.s16", short, unsigned short,
+                 "=h", : "memory");
+__INTRINSIC_LOAD(__ldcv, "ld.global.cv.s32", int, unsigned int,
+                 "=r", : "memory");
+__INTRINSIC_LOAD(__ldcv, "ld.global.cv.s64", long long, unsigned long long,
+                 "=l", : "memory");
+
+__INTRINSIC_LOAD2(__ldcv, "ld.global.cv.v2.u8", uchar2, uint2,
+                  "=r", : "memory");
+__INTRINSIC_LOAD4(__ldcv, "ld.global.cv.v4.u8", uchar4, uint4,
+                  "=r", : "memory");
+__INTRINSIC_LOAD2(__ldcv, "ld.global.cv.v2.u16", ushort2, ushort2,
+                  "=h", : "memory");
+__INTRINSIC_LOAD4(__ldcv, "ld.global.cv.v4.u16", ushort4, ushort4,
+                  "=h", : "memory");
+__INTRINSIC_LOAD2(__ldcv, "ld.global.cv.v2.u32", uint2, uint2,
+                  "=r", : "memory");
+__INTRINSIC_LOAD4(__ldcv, "ld.global.cv.v4.u32", uint4, uint4,
+                  "=r", : "memory");
+__INTRINSIC_LOAD2(__ldcv, "ld.global.cv.v2.u64", ulonglong2, ulonglong2,
+                  "=l", : "memory");
+
+__INTRINSIC_LOAD2(__ldcv, "ld.global.cv.v2.s8", char2, int2, "=r", : "memory");
+__INTRINSIC_LOAD4(__ldcv, "ld.global.cv.v4.s8", char4, int4, "=r", : "memory");
+__INTRINSIC_LOAD2(__ldcv, "ld.global.cv.v2.s16", short2, short2,
+                  "=h", : "memory");
+__INTRINSIC_LOAD4(__ldcv, "ld.global.cv.v4.s16", short4, short4,
+                  "=h", : "memory");
+__INTRINSIC_LOAD2(__ldcv, "ld.global.cv.v2.s32", int2, int2, "=r", : "memory");
+__INTRINSIC_LOAD4(__ldcv, "ld.global.cv.v4.s32", int4, int4, "=r", : "memory");
+__INTRINSIC_LOAD2(__ldcv, "ld.global.cv.v2.s64", longlong2, longlong2,
+                  "=l", : "memory");
+
+__INTRINSIC_LOAD(__ldcv, "ld.global.cv.f32", float, float, "=f", : "memory");
+__INTRINSIC_LOAD(__ldcv, "ld.global.cv.f64", double, double, "=d", : "memory");
+
+__INTRINSIC_LOAD2(__ldcv, "ld.global.cv.v2.f32", float2, float2,
+                  "=f", : "memory");
+__INTRINSIC_LOAD4(__ldcv, "ld.global.cv.v4.f32", float4, float4,
+                  "=f", : "memory");
+__INTRINSIC_LOAD2(__ldcv, "ld.global.cv.v2.f64", double2, double2,
+                  "=d", : "memory");
+
+inline __device__ long __ldcv(const long *ptr) {
+  unsigned long ret;
+  if (sizeof(long) == 8) {
+    asm("ld.global.cv.s64 %0, [%1];" : "=l"(ret) : "l"(ptr));
+  } else {
+    asm("ld.global.cv.s32 %0, [%1];" : "=r"(ret) : "l"(ptr));
+  }
+  return (long)ret;
+}
+
+__INTRINSIC_LOAD(__ldcs, "ld.global.cs.s8", char, unsigned int, "=r", );
+__INTRINSIC_LOAD(__ldcs, "ld.global.cs.s8", signed char, signed int, "=r", );
+__INTRINSIC_LOAD(__ldcs, "ld.global.cs.s16", short, unsigned short, "=h", );
+__INTRINSIC_LOAD(__ldcs, "ld.global.cs.s32", int, unsigned int, "=r", );
+__INTRINSIC_LOAD(__ldcs, "ld.global.cs.s64", long long, unsigned long long,
+                 "=l", );
+
+__INTRINSIC_LOAD2(__ldcs, "ld.global.cs.v2.s8", char2, int2, "=r", );
+__INTRINSIC_LOAD4(__ldcs, "ld.global.cs.v4.s8", char4, int4, "=r", );
+__INTRINSIC_LOAD2(__ldcs, "ld.global.cs.v2.s16", short2, short2, "=h", );
+__INTRINSIC_LOAD4(__ldcs, "ld.global.cs.v4.s16", short4, short4, "=h", );
+__INTRINSIC_LOAD2(__ldcs, "ld.global.cs.v2.s32", int2, int2, "=r", );
+__INTRINSIC_LOAD4(__ldcs, "ld.global.cs.v4.s32", int4, int4, "=r", );
+__INTRINSIC_LOAD2(__ldcs, "ld.global.cs.v2.s64", longlong2, longlong2, "=l", );
+
+__INTRINSIC_LOAD(__ldcs, "ld.global.cs.u8", unsigned char, unsigned int,
+                 "=r", );
+__INTRINSIC_LOAD(__ldcs, "ld.global.cs.u16", unsigned short, unsigned short,
+                 "=h", );
+__INTRINSIC_LOAD(__ldcs, "ld.global.cs.u32", unsigned int, unsigned int,
+                 "=r", );
+__INTRINSIC_LOAD(__ldcs, "ld.global.cs.u64", unsigned long long,
+                 unsigned long long, "=l", );
+
+__INTRINSIC_LOAD2(__ldcs, "ld.global.cs.v2.u8", uchar2, uint2, "=r", );
+__INTRINSIC_LOAD4(__ldcs, "ld.global.cs.v4.u8", uchar4, uint4, "=r", );
+__INTRINSIC_LOAD2(__ldcs, "ld.global.cs.v2.u16", ushort2, ushort2, "=h", );
+__INTRINSIC_LOAD4(__ldcs, "ld.global.cs.v4.u16", ushort4, ushort4, "=h", );
+__INTRINSIC_LOAD2(__ldcs, "ld.global.cs.v2.u32", uint2, uint2, "=r", );
+__INTRINSIC_LOAD4(__ldcs, "ld.global.cs.v4.u32", uint4, uint4, "=r", );
+__INTRINSIC_LOAD2(__ldcs, "ld.global.cs.v2.u64", ulonglong2, ulonglong2,
+                  "=l", );
+
+__INTRINSIC_LOAD(__ldcs, "ld.global.cs.f32", float, float, "=f", );
+__INTRINSIC_LOAD(__ldcs, "ld.global.cs.f64", double, double, "=d", );
+__INTRINSIC_LOAD2(__ldcs, "ld.global.cs.v2.f32", float2, float2, "=f", );
+__INTRINSIC_LOAD4(__ldcs, "ld.global.cs.v4.f32", float4, float4, "=f", );
+__INTRINSIC_LOAD2(__ldcs, "ld.global.cs.v2.f64", double2, double2, "=d", );
+
+#pragma pop_macro("__INTRINSIC_LOAD")
+#pragma pop_macro("__INTRINSIC_LOAD2")
+#pragma pop_macro("__INTRINSIC_LOAD4")
+
+inline __device__ long __ldcs(const long *ptr) {
+  unsigned long ret;
+  if (sizeof(long) == 8) {
+    asm("ld.global.cs.s64 %0, [%1];" : "=l"(ret) : "l"(ptr));
+  } else {
+    asm("ld.global.cs.s32 %0, [%1];" : "=r"(ret) : "l"(ptr));
+  }
+  return (long)ret;
+}
+
+#pragma push_macro("__INTRINSIC_STORE")
+#define __INTRINSIC_STORE(__FnName, __AsmOp, __DeclType, __TmpType, __AsmType) 
\
+  inline __device__ void __FnName(__DeclType *ptr, __DeclType value) {         
\
+    __TmpType tmp = (__TmpType)value;                                          
\
+    asm(__AsmOp " [%0], %1;" ::"l"(ptr), __AsmType(tmp) : "memory");           
\
+  }
+
+#pragma push_macro("__INTRINSIC_STORE2")
+#define __INTRINSIC_STORE2(__FnName, __AsmOp, __DeclType, __TmpType,   \
+                           __AsmType)                                  \
+  inline __device__ void __FnName(__DeclType *ptr, __DeclType value) { \
+    __TmpType tmp;                                                     \
+    using __ElementType = decltype(tmp.x);                             \
+    tmp.x = (__ElementType)(value.x);                                  \
+    tmp.y = (__ElementType)(value.y);                                  \
+    asm(__AsmOp " [%0], {%1,%2};" ::"l"(ptr), __AsmType(tmp.x),        \
+        __AsmType(tmp.y)                                               \
+        : "memory");                                                   \
+  }
+
+#pragma push_macro("__INTRINSIC_STORE4")
+#define __INTRINSIC_STORE4(__FnName, __AsmOp, __DeclType, __TmpType,   \
+                           __AsmType)                                  \
+  inline __device__ void __FnName(__DeclType *ptr, __DeclType value) { \
+    __TmpType tmp;                                                     \
+    using __ElementType = decltype(tmp.x);                             \
+    tmp.x = (__ElementType)(value.x);                                  \
+    tmp.y = (__ElementType)(value.y);                                  \
+    tmp.z = (__ElementType)(value.z);                                  \
+    tmp.w = (__ElementType)(value.w);                                  \
+    asm(__AsmOp " [%0], {%1,%2,%3,%4};" ::"l"(ptr), __AsmType(tmp.x),  \
+        __AsmType(tmp.y), __AsmType(tmp.z), __AsmType(tmp.w)           \
+        : "memory");                                                   \
+  }
+
+__INTRINSIC_STORE(__stwt, "st.global.wt.s8", char, int, "r");
+__INTRINSIC_STORE(__stwt, "st.global.wt.s8", signed char, int, "r");
+__INTRINSIC_STORE(__stwt, "st.global.wt.s16", short, short, "h");
+__INTRINSIC_STORE(__stwt, "st.global.wt.s32", int, int, "r");
+__INTRINSIC_STORE(__stwt, "st.global.wt.s64", long long, long long, "l");
+
+__INTRINSIC_STORE2(__stwt, "st.global.wt.v2.s8", char2, int2, "r");
+__INTRINSIC_STORE4(__stwt, "st.global.wt.v4.s8", char4, int4, "r");
+__INTRINSIC_STORE2(__stwt, "st.global.wt.v2.s16", short2, short2, "h");
+__INTRINSIC_STORE4(__stwt, "st.global.wt.v4.s16", short4, short4, "h");
+__INTRINSIC_STORE2(__stwt, "st.global.wt.v2.s32", int2, int2, "r");
+__INTRINSIC_STORE4(__stwt, "st.global.wt.v4.s32", int4, int4, "r");
+__INTRINSIC_STORE2(__stwt, "st.global.wt.v2.s64", longlong2, longlong2, "l");
+
+__INTRINSIC_STORE(__stwt, "st.global.wt.u8", unsigned char, int, "r");
+__INTRINSIC_STORE(__stwt, "st.global.wt.u16", unsigned short, unsigned short,
+                  "h");
+__INTRINSIC_STORE(__stwt, "st.global.wt.u32", unsigned int, unsigned int, "r");
+__INTRINSIC_STORE(__stwt, "st.global.wt.u64", unsigned long long,
+                  unsigned long long, "l");
+
+__INTRINSIC_STORE2(__stwt, "st.global.wt.v2.u8", uchar2, uchar2, "r");
+__INTRINSIC_STORE4(__stwt, "st.global.wt.v4.u8", uchar4, uint4, "r");
+__INTRINSIC_STORE2(__stwt, "st.global.wt.v2.u16", ushort2, ushort2, "h");
+__INTRINSIC_STORE4(__stwt, "st.global.wt.v4.u16", ushort4, ushort4, "h");
+__INTRINSIC_STORE2(__stwt, "st.global.wt.v2.u32", uint2, uint2, "r");
+__INTRINSIC_STORE4(__stwt, "st.global.wt.v4.u32", uint4, uint4, "r");
+__INTRINSIC_STORE2(__stwt, "st.global.wt.v2.u64", ulonglong2, ulonglong2, "l");
+
+__INTRINSIC_STORE(__stwt, "st.global.wt.f32", float, float, "f");
+__INTRINSIC_STORE(__stwt, "st.global.wt.f64", double, double, "d");
+__INTRINSIC_STORE2(__stwt, "st.global.wt.v2.f32", float2, float2, "f");
+__INTRINSIC_STORE4(__stwt, "st.global.wt.v4.f32", float4, float4, "f");
+__INTRINSIC_STORE2(__stwt, "st.global.wt.v2.f64", double2, double2, "d");
+
+#pragma pop_macro("__INTRINSIC_STORE")
+#pragma pop_macro("__INTRINSIC_STORE2")
+#pragma pop_macro("__INTRINSIC_STORE4")
+
+#endif  // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
 
 #if CUDA_VERSION >= 11000
 extern "C" {

_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to