================
@@ -2949,15 +2950,34 @@ static void handleSectionAttr(Sema &S, Decl *D, const 
ParsedAttr &AL) {
   }
 }
 
+static bool isValidCodeModelAttr(Sema &S, StringRef Str) {
+  if (S.Context.getTargetInfo().getTriple().isLoongArch()) {
+    return Str == "normal" || Str == "medium" || Str == "extreme";
+  } else {
+    assert(S.Context.getTargetInfo().getTriple().getArch() ==
+               llvm::Triple::x86_64 &&
+           "only loongarch/x86-64 supported");
+    return Str == "small" || Str == "large";
+  }
+}
+
 static void handleCodeModelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
   StringRef Str;
   SourceLocation LiteralLoc;
   // Check that it is a string.
   if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &LiteralLoc))
     return;
 
+  // Ignore the attribute for GPU device compiles since it only applies to host
+  // globals.
+  if (S.Context.getTargetInfo().getTriple().isNVPTX() ||
----------------
rnk wrote:

Aren't we? I mean, I'm sure nvidia and vendors everywhere do crazy stuff, but I 
would like to clarify what the support scope of the Clang project is. My 
understanding of all the CUDA functionality we've ever added to Clang was that 
it assumes we're using the Clang compiler on both sides, not the janky nvcc 
split pre-processing model.

https://github.com/llvm/llvm-project/pull/124834
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to