linjamaki created this revision.
Herald added subscribers: Anastasia, yaxunl.
linjamaki updated this revision to Diff 372671.
linjamaki added a comment.
Herald added subscribers: dexonsmith, hiraditya.
linjamaki updated this revision to Diff 373494.
linjamaki edited the summary of this revision.
linjamaki published this revision for review.
linjamaki added reviewers: Anastasia, bader.
Herald added a project: clang.
Herald added a subscriber: cfe-commits.

Rebase.


linjamaki added a comment.

Rebase.


This patch translates HIP kernels to SPIR-V kernels when the HIP 
compilation mode is targeting SPIR-S. This involves:

- Setting Cuda calling convention to CC_OpenCLKernel (which maps to SPIR_KERNEL 
in LLVM IR later on).

- Coercing pointer arguments with default address space (AS) qualifier to 
CrossWorkGroup AS (__global in OpenCL). HIPSPV's device code is ultimately 
SPIR-V for OpenCL execution environment (as starter/default) where Generic or 
Function (OpenCL's private) is not supported as storage class for kernel 
pointer types. This leaves the CrossWorkGroup to be the only reasonable choice 
for HIP buffers.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D109818

Files:
  clang/lib/CodeGen/TargetInfo.cpp
  clang/test/CodeGenHIP/hipspv-kernel.cpp

Index: clang/test/CodeGenHIP/hipspv-kernel.cpp
===================================================================
--- /dev/null
+++ clang/test/CodeGenHIP/hipspv-kernel.cpp
@@ -0,0 +1,9 @@
+// RUN: %clang_cc1 -triple spirv64 -x hip -emit-llvm -fcuda-is-device \
+// RUN:   -o - %s | FileCheck %s
+
+#define __global__ __attribute__((global))
+
+// CHECK: define {{.*}}spir_kernel void @_Z3fooPff(float addrspace(1)* {{.*}}, float {{.*}})
+__global__ void foo(float *a, float b) {
+  *a = b;
+}
Index: clang/lib/CodeGen/TargetInfo.cpp
===================================================================
--- clang/lib/CodeGen/TargetInfo.cpp
+++ clang/lib/CodeGen/TargetInfo.cpp
@@ -10189,8 +10189,11 @@
 public:
   SPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); }
 
+  void computeInfo(CGFunctionInfo &FI) const override;
+
 private:
   void setCCs();
+  ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
 };
 } // end anonymous namespace
 namespace {
@@ -10205,6 +10208,7 @@
   }
 
   unsigned getOpenCLKernelCallingConv() const override;
+  void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
 };
 
 } // End anonymous namespace.
@@ -10213,10 +10217,44 @@
   RuntimeCC = llvm::CallingConv::SPIR_FUNC;
 }
 
+ABIArgInfo SPIRABIInfo::classifyKernelArgumentType(QualType Ty) const {
+  if (getContext().getLangOpts().HIP && getTarget().getTriple().isSPIRV()) {
+    // Coerce pointer arguments with default address space to CrossWorkGroup
+    // pointers for HIPSPV. When the language mode is HIP, the SPIRTargetInfo
+    // maps cuda_device to SPIR-V's CrossWorkGroup address space.
+    llvm::Type *LTy = CGT.ConvertType(Ty);
+    auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
+    auto GlobalAS = getContext().getTargetAddressSpace(LangAS::cuda_device);
+    if (LTy->isPointerTy() && LTy->getPointerAddressSpace() == DefaultAS) {
+      LTy = llvm::PointerType::get(
+          cast<llvm::PointerType>(LTy)->getElementType(), GlobalAS);
+      return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
+    }
+  }
+  return classifyArgumentType(Ty);
+}
+
+void SPIRABIInfo::computeInfo(CGFunctionInfo &FI) const {
+  // The logic is same as in DefaultABIInfo with an exception on the kernel
+  // arguments handling.
+  llvm::CallingConv::ID CC = FI.getCallingConvention();
+
+  if (!getCXXABI().classifyReturnType(FI))
+    FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+  for (auto &I : FI.arguments()) {
+    if (CC == llvm::CallingConv::SPIR_KERNEL) {
+      I.info = classifyKernelArgumentType(I.type);
+    } else {
+      I.info = classifyArgumentType(I.type);
+    }
+  }
+}
+
 namespace clang {
 namespace CodeGen {
 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
-  DefaultABIInfo SPIRABI(CGM.getTypes());
+  SPIRABIInfo SPIRABI(CGM.getTypes());
   SPIRABI.computeInfo(FI);
 }
 }
@@ -10226,6 +10264,18 @@
   return llvm::CallingConv::SPIR_KERNEL;
 }
 
+void SPIRTargetCodeGenInfo::setCUDAKernelCallingConvention(
+    const FunctionType *&FT) const {
+  // Convert HIP kernels to SPIR-V kernels.
+  if (getABIInfo().getContext().getLangOpts().HIP &&
+      getABIInfo().getTarget().getTriple().isSPIRV()) {
+    FT = getABIInfo().getContext().adjustFunctionType(
+        FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
+    return;
+  }
+  TargetCodeGenInfo::setCUDAKernelCallingConvention(FT);
+}
+
 static bool appendType(SmallStringEnc &Enc, QualType QType,
                        const CodeGen::CodeGenModule &CGM,
                        TypeStringCache &TSC);
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to