https://github.com/krzysz00 updated 
https://github.com/llvm/llvm-project/pull/126828

>From f125444bb53e1e10b40b352e9cf7fd3ad052bfbf Mon Sep 17 00:00:00 2001
From: Krzysztof Drewniak <krzysztof.drewn...@amd.com>
Date: Tue, 11 Feb 2025 23:55:36 +0000
Subject: [PATCH 1/2] [AMDGPU] Generalize amdgcn.make.buffer.rsrc to fat
 pointers

Squashed commit that'll take its value from github pr desc
---
 clang/lib/CodeGen/CGBuiltin.cpp               | 16 ++++-
 .../CodeGenHIP/builtins-make-buffer-rsrc.hip  |  8 +--
 .../builtins-amdgcn-make-buffer-rsrc.cl       | 20 +++----
 llvm/docs/AMDGPUUsage.rst                     | 13 ++++-
 llvm/include/llvm/IR/IntrinsicsAMDGPU.td      | 15 ++++-
 llvm/lib/IR/AutoUpgrade.cpp                   |  1 +
 .../AMDGPU/AMDGPULowerBufferFatPointers.cpp   | 20 +++++++
 .../llvm.amdgcn.make.buffer.rsrc.ll           | 58 ++++++++++++++++---
 .../AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll    | 58 ++++++++++++++++---
 .../lower-buffer-fat-pointers-pointer-ops.ll  | 14 +++++
 .../AMDGPU/make-buffer-rsrc-lds-fails.ll      |  4 +-
 .../AMDGPU/ptr-buffer-alias-scheduling.ll     |  6 +-
 .../FunctionAttrs/make-buffer-rsrc.ll         | 14 ++---
 .../LICM/AMDGPU/buffer-rsrc-ptrs.ll           | 12 ++--
 mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td  |  8 +--
 mlir/test/Target/LLVMIR/rocdl.mlir            | 13 ++++-
 16 files changed, 220 insertions(+), 60 deletions(-)

diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index d57f491a20c8e..348cb523b1718 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -20723,9 +20723,19 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned 
BuiltinID,
   case AMDGPU::BI__builtin_amdgcn_bitop3_b16:
     return emitBuiltinWithOneOverloadedType<4>(*this, E,
                                                Intrinsic::amdgcn_bitop3);
-  case AMDGPU::BI__builtin_amdgcn_make_buffer_rsrc:
-    return emitBuiltinWithOneOverloadedType<4>(
-        *this, E, Intrinsic::amdgcn_make_buffer_rsrc);
+  case AMDGPU::BI__builtin_amdgcn_make_buffer_rsrc: {
+    // TODO: LLVM has this overloaded to allow for fat pointers, but since
+    // those haven't been plumbed through to Clang yet, default to creating the
+    // resource type.
+    SmallVector<Value *, 4> Args;
+    for (unsigned I = 0; I < 4; ++I)
+      Args.push_back(EmitScalarExpr(E->getArg(I)));
+    llvm::PointerType *RetTy = llvm::PointerType::get(
+        Builder.getContext(), llvm::AMDGPUAS::BUFFER_RESOURCE);
+    Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_make_buffer_rsrc,
+                                   {RetTy, Args[0]->getType()});
+    return Builder.CreateCall(F, Args);
+  }
   case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b8:
   case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b16:
   case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b32:
diff --git a/clang/test/CodeGenHIP/builtins-make-buffer-rsrc.hip 
b/clang/test/CodeGenHIP/builtins-make-buffer-rsrc.hip
index c1a30633f3d0a..2342fcefb5f89 100644
--- a/clang/test/CodeGenHIP/builtins-make-buffer-rsrc.hip
+++ b/clang/test/CodeGenHIP/builtins-make-buffer-rsrc.hip
@@ -25,7 +25,7 @@
 // CHECK-NEXT:    [[TMP1:%.*]] = load i16, ptr [[STRIDE_ADDR_ASCAST]], align 2
 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[NUM_ADDR_ASCAST]], align 4
 // CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[FLAGS_ADDR_ASCAST]], align 4
-// CHECK-NEXT:    [[TMP4:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p0(ptr [[TMP0]], i16 [[TMP1]], i32 [[TMP2]], i32 
[[TMP3]])
+// CHECK-NEXT:    [[TMP4:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i32 [[TMP2]], 
i32 [[TMP3]])
 // CHECK-NEXT:    ret ptr addrspace(8) [[TMP4]]
 //
 __device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0(void *p, 
short stride, int num, int flags) {
@@ -49,7 +49,7 @@ __device__ __amdgpu_buffer_rsrc_t 
test_amdgcn_make_buffer_rsrc_p0(void *p, short
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[NUM_ADDR_ASCAST]], align 4
 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[FLAGS_ADDR_ASCAST]], align 4
-// CHECK-NEXT:    [[TMP3:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p0(ptr [[TMP0]], i16 4, i32 [[TMP1]], i32 
[[TMP2]])
+// CHECK-NEXT:    [[TMP3:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 4, i32 [[TMP1]], i32 
[[TMP2]])
 // CHECK-NEXT:    ret ptr addrspace(8) [[TMP3]]
 //
 __device__ __amdgpu_buffer_rsrc_t 
test_amdgcn_make_buffer_rsrc_p0_stride_constant(void *p, int num, int flags) {
@@ -73,7 +73,7 @@ __device__ __amdgpu_buffer_rsrc_t 
test_amdgcn_make_buffer_rsrc_p0_stride_constan
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load i16, ptr [[STRIDE_ADDR_ASCAST]], align 2
 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[FLAGS_ADDR_ASCAST]], align 4
-// CHECK-NEXT:    [[TMP3:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p0(ptr [[TMP0]], i16 [[TMP1]], i32 1234, i32 
[[TMP2]])
+// CHECK-NEXT:    [[TMP3:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i32 1234, i32 
[[TMP2]])
 // CHECK-NEXT:    ret ptr addrspace(8) [[TMP3]]
 //
 __device__ __amdgpu_buffer_rsrc_t 
test_amdgcn_make_buffer_rsrc_p0_num_constant(void *p, short stride, int flags) {
@@ -97,7 +97,7 @@ __device__ __amdgpu_buffer_rsrc_t 
test_amdgcn_make_buffer_rsrc_p0_num_constant(v
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load i16, ptr [[STRIDE_ADDR_ASCAST]], align 2
 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[NUM_ADDR_ASCAST]], align 4
-// CHECK-NEXT:    [[TMP3:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p0(ptr [[TMP0]], i16 [[TMP1]], i32 [[TMP2]], i32 
5678)
+// CHECK-NEXT:    [[TMP3:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i32 [[TMP2]], 
i32 5678)
 // CHECK-NEXT:    ret ptr addrspace(8) [[TMP3]]
 //
 __device__ __amdgpu_buffer_rsrc_t 
test_amdgcn_make_buffer_rsrc_p0_flags_constant(void *p, short stride, int num) {
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-make-buffer-rsrc.cl 
b/clang/test/CodeGenOpenCL/builtins-amdgcn-make-buffer-rsrc.cl
index 2c7bc10fb609c..29093c09c39d0 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-make-buffer-rsrc.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-make-buffer-rsrc.cl
@@ -4,7 +4,7 @@
 
 // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p0(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i32 
[[NUM:%.*]], i32 [[FLAGS:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i32 
[[NUM:%.*]], i32 [[FLAGS:%.*]])
 // CHECK-NEXT:    ret ptr addrspace(8) [[TMP0]]
 //
 __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0(void *p, short stride, 
int num, int flags) {
@@ -13,7 +13,7 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0(void 
*p, short stride, in
 
 // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p0_stride_constant(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p0(ptr [[P:%.*]], i16 4, i32 [[NUM:%.*]], i32 
[[FLAGS:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P:%.*]], i16 4, i32 [[NUM:%.*]], i32 
[[FLAGS:%.*]])
 // CHECK-NEXT:    ret ptr addrspace(8) [[TMP0]]
 //
 __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_stride_constant(void 
*p, int num, int flags) {
@@ -22,7 +22,7 @@ __amdgpu_buffer_rsrc_t 
test_amdgcn_make_buffer_rsrc_p0_stride_constant(void *p,
 
 // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p0_num_constant(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i32 1234, 
i32 [[FLAGS:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i32 
1234, i32 [[FLAGS:%.*]])
 // CHECK-NEXT:    ret ptr addrspace(8) [[TMP0]]
 //
 __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_num_constant(void *p, 
short stride, int flags) {
@@ -31,7 +31,7 @@ __amdgpu_buffer_rsrc_t 
test_amdgcn_make_buffer_rsrc_p0_num_constant(void *p, sho
 
 // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p0_flags_constant(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i32 
[[NUM:%.*]], i32 5678)
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i32 
[[NUM:%.*]], i32 5678)
 // CHECK-NEXT:    ret ptr addrspace(8) [[TMP0]]
 //
 __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_flags_constant(void *p, 
short stride, int num) {
@@ -40,7 +40,7 @@ __amdgpu_buffer_rsrc_t 
test_amdgcn_make_buffer_rsrc_p0_flags_constant(void *p, s
 
 // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p1(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) [[P:%.*]], i16 
[[STRIDE:%.*]], i32 [[NUM:%.*]], i32 [[FLAGS:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P:%.*]], i16 
[[STRIDE:%.*]], i32 [[NUM:%.*]], i32 [[FLAGS:%.*]])
 // CHECK-NEXT:    ret ptr addrspace(8) [[TMP0]]
 //
 __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1(global void *p, short 
stride, int num, int flags) {
@@ -49,7 +49,7 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1(global 
void *p, short str
 
 // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p1_stride_constant(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) [[P:%.*]], i16 4, i32 
[[NUM:%.*]], i32 [[FLAGS:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P:%.*]], i16 4, i32 
[[NUM:%.*]], i32 [[FLAGS:%.*]])
 // CHECK-NEXT:    ret ptr addrspace(8) [[TMP0]]
 //
 __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1_stride_constant(global 
void *p, int num, int flags) {
@@ -58,7 +58,7 @@ __amdgpu_buffer_rsrc_t 
test_amdgcn_make_buffer_rsrc_p1_stride_constant(global vo
 
 // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p1_num_constant(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) [[P:%.*]], i16 
[[STRIDE:%.*]], i32 1234, i32 [[FLAGS:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P:%.*]], i16 
[[STRIDE:%.*]], i32 1234, i32 [[FLAGS:%.*]])
 // CHECK-NEXT:    ret ptr addrspace(8) [[TMP0]]
 //
 __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1_num_constant(global 
void *p, short stride, int flags) {
@@ -67,7 +67,7 @@ __amdgpu_buffer_rsrc_t 
test_amdgcn_make_buffer_rsrc_p1_num_constant(global void
 
 // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p1_flags_constant(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) [[P:%.*]], i16 
[[STRIDE:%.*]], i32 [[NUM:%.*]], i32 5678)
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P:%.*]], i16 
[[STRIDE:%.*]], i32 [[NUM:%.*]], i32 5678)
 // CHECK-NEXT:    ret ptr addrspace(8) [[TMP0]]
 //
 __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1_flags_constant(global 
void *p, short stride, int num) {
@@ -76,7 +76,7 @@ __amdgpu_buffer_rsrc_t 
test_amdgcn_make_buffer_rsrc_p1_flags_constant(global voi
 
 // CHECK-LABEL: @test_amdgcn_make_buffer_p0_nullptr(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p0(ptr null, i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], 
i32 [[FLAGS:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr null, i16 [[STRIDE:%.*]], i32 
[[NUM:%.*]], i32 [[FLAGS:%.*]])
 // CHECK-NEXT:    ret ptr addrspace(8) [[TMP0]]
 //
 __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_p0_nullptr(short stride, int 
num, int flags) {
@@ -85,7 +85,7 @@ __amdgpu_buffer_rsrc_t 
test_amdgcn_make_buffer_p0_nullptr(short stride, int num,
 
 // CHECK-LABEL: @test_amdgcn_make_buffer_p1_nullptr(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) null, i16 [[STRIDE:%.*]], i32 
[[NUM:%.*]], i32 [[FLAGS:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) null, i16 [[STRIDE:%.*]], 
i32 [[NUM:%.*]], i32 [[FLAGS:%.*]])
 // CHECK-NEXT:    ret ptr addrspace(8) [[TMP0]]
 //
 __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_p1_nullptr(short stride, int 
num, int flags) {
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 899b2cf3b4901..5966d1617feee 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -990,7 +990,12 @@ supported for the ``amdgcn`` target.
   the stride must be 0, the "add tid" flag must be 0, the swizzle enable bits
   must be off, and the extent must be measured in bytes. (On subtargets where
   bounds checking may be disabled, buffer fat pointers may choose to enable
-  it or not).
+  it or not). The cache swizzle support introduced in gfx942 may be used.
+
+  These pointers can be created by `addrspacecast` from a buffer resource
+  (`ptr addrspace(8)`) or by using `llvm.amdgcn.make.buffer.rsrc` to produce a
+  `ptr addrspace(7)` directly, which produces a buffer fat pointer with an 
initial
+  offset of 0 and prevents the address space cast from being rewritten away.
 
 **Buffer Resource**
   The buffer resource pointer, in address space 8, is the newer form
@@ -1027,6 +1032,12 @@ supported for the ``amdgcn`` target.
   the stride is the size of a structured element, the "add tid" flag must be 0,
   and the swizzle enable bits must be off.
 
+  These pointers can be created by `addrspacecast` from a buffer resource
+  (`ptr addrspace(8)`) or by using `llvm.amdgcn.make.buffer.rsrc` to produce a
+  `ptr addrspace(9)` directly, which produces a buffer strided pointer whose 
initial
+  index and offset values are both 0. This prevents the address space cast from
+  being rewritten away.
+
 **Streamout Registers**
   Dedicated registers used by the GS NGG Streamout Instructions. The register
   file is modelled as a memory in a distinct address space because it is 
indexed
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td 
b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index d5d185ebc12f6..9558f2b9b74e0 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1284,11 +1284,24 @@ defset list<AMDGPUImageDimIntrinsic> 
AMDGPUImageDimAtomicIntrinsics = {
 // Data type for buffer resources (V#). Maybe, in the future, we can create a
 // similar one for textures (T#).
 def AMDGPUBufferRsrcTy : LLVMQualPointerType<8>;
+// Data type for buffer fat pointers, which are a buffer resource (V#) 
followed by
+// a 32-bit offset. These don't exist in hardware and are a compiler-internal
+// convenience.
+def AMDGPUBufferFatPointerTy : LLVMQualPointerType<7>;
 
 let TargetPrefix = "amdgcn" in {
 
+// Create a buffer resource wrapping `base` with the specified `stride`
+// `numrecords`, and `flags`. All of these values will need to be
+// wave-uniform when the buffer instructions are invoked, so non-uniform
+// inputs to this intrinsic will trigger waterfall loops.
+//
+// In addition to creating ptr addrspace(8), whe representation of buffer
+// resources, it can create the fat pointers ptr addrspace(7) and ptr 
addrspace(9),
+// which carry additional offset bits. When this intrinsic is used to create
+// these fat pointers, their offset and index fields (if applicable) are zero.
 def int_amdgcn_make_buffer_rsrc : DefaultAttrsIntrinsic <
-  [AMDGPUBufferRsrcTy],
+  [llvm_anyptr_ty],
   [llvm_anyptr_ty, // base
    llvm_i16_ty,    // stride (and swizzle control)
    llvm_i32_ty,    // NumRecords / extent
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 57072715366c9..48c4c81d324a9 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -30,6 +30,7 @@
 #include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/IntrinsicsAArch64.h"
+#include "llvm/IR/IntrinsicsAMDGPU.h"
 #include "llvm/IR/IntrinsicsARM.h"
 #include "llvm/IR/IntrinsicsNVPTX.h"
 #include "llvm/IR/IntrinsicsRISCV.h"
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp 
b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
index b0b6c4df8e982..86b2c4f78fc3e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
@@ -2078,6 +2078,7 @@ static bool isRemovablePointerIntrinsic(Intrinsic::ID 
IID) {
   switch (IID) {
   default:
     return false;
+  case Intrinsic::amdgcn_make_buffer_rsrc:
   case Intrinsic::ptrmask:
   case Intrinsic::invariant_start:
   case Intrinsic::invariant_end:
@@ -2092,6 +2093,25 @@ PtrParts 
SplitPtrStructs::visitIntrinsicInst(IntrinsicInst &I) {
   switch (IID) {
   default:
     break;
+  case Intrinsic::amdgcn_make_buffer_rsrc: {
+    if (!isSplitFatPtr(I.getType()))
+      return {nullptr, nullptr};
+    Value *Base = I.getArgOperand(0);
+    Value *Stride = I.getArgOperand(1);
+    Value *NumRecords = I.getArgOperand(2);
+    Value *Flags = I.getArgOperand(3);
+    auto *SplitType = cast<StructType>(I.getType());
+    Type *RsrcType = SplitType->getElementType(0);
+    Type *OffType = SplitType->getElementType(1);
+    IRB.SetInsertPoint(&I);
+    Value *Rsrc = IRB.CreateIntrinsic(IID, {RsrcType, Base->getType()},
+                                      {Base, Stride, NumRecords, Flags});
+    copyMetadata(Rsrc, &I);
+    Rsrc->takeName(&I);
+    Value *Zero = Constant::getNullValue(OffType);
+    SplitUsers.insert(&I);
+    return {Rsrc, Zero};
+  }
   case Intrinsic::ptrmask: {
     Value *Ptr = I.getArgOperand(0);
     if (!isSplitFatPtr(Ptr->getType()))
diff --git 
a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.make.buffer.rsrc.ll 
b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.make.buffer.rsrc.ll
index 4a151aeca87e4..6171c73d8d2dc 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.make.buffer.rsrc.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.make.buffer.rsrc.ll
@@ -25,7 +25,7 @@ define amdgpu_ps ptr addrspace(8) @basic_raw_buffer(ptr inreg 
%p) {
   ; CHECK-NEXT:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32 = 
V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
   ; CHECK-NEXT:   $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]]
   ; CHECK-NEXT:   SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1, 
implicit $sgpr2, implicit $sgpr3
-  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 
0, i32 1234, i32 5678)
+  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, 
i16 0, i32 1234, i32 5678)
   ret ptr addrspace(8) %rsrc
 }
 
@@ -43,7 +43,7 @@ define amdgpu_ps float @read_raw_buffer(ptr addrspace(1) 
inreg %p) {
   ; CHECK-NEXT:   [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = 
BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 4, 0, 0, implicit 
$exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8)
   ; CHECK-NEXT:   $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]]
   ; CHECK-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0
-  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr 
addrspace(1) %p, i16 0, i32 0, i32 0)
+  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr 
addrspace(1) %p, i16 0, i32 0, i32 0)
   %loaded = call float @llvm.amdgcn.raw.ptr.buffer.load(ptr addrspace(8) 
%rsrc, i32 4, i32 0, i32 0)
   ret float %loaded
 }
@@ -74,7 +74,7 @@ define amdgpu_ps ptr addrspace(8) @basic_struct_buffer(ptr 
inreg %p) {
   ; CHECK-NEXT:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32 = 
V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
   ; CHECK-NEXT:   $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]]
   ; CHECK-NEXT:   SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1, 
implicit $sgpr2, implicit $sgpr3
-  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 
4, i32 1234, i32 5678)
+  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, 
i16 4, i32 1234, i32 5678)
   ret ptr addrspace(8) %rsrc
 }
 
@@ -104,7 +104,7 @@ define amdgpu_ps ptr addrspace(8) @variable_top_half(ptr 
inreg %p, i32 inreg %nu
   ; CHECK-NEXT:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32 = 
V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
   ; CHECK-NEXT:   $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]]
   ; CHECK-NEXT:   SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1, 
implicit $sgpr2, implicit $sgpr3
-  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 
4, i32 %numVals, i32 %flags)
+  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, 
i16 4, i32 %numVals, i32 %flags)
   ret ptr addrspace(8) %rsrc
 }
 
@@ -136,7 +136,7 @@ define amdgpu_ps ptr addrspace(8) @general_case(ptr inreg 
%p, i16 inreg %stride,
   ; CHECK-NEXT:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32 = 
V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
   ; CHECK-NEXT:   $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]]
   ; CHECK-NEXT:   SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1, 
implicit $sgpr2, implicit $sgpr3
-  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 
%stride, i32 %numVals, i32 %flags)
+  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, 
i16 %stride, i32 %numVals, i32 %flags)
   ret ptr addrspace(8) %rsrc
 }
 
@@ -161,7 +161,7 @@ define amdgpu_ps float @general_case_load(ptr inreg %p, i16 
inreg %stride, i32 i
   ; CHECK-NEXT:   [[BUFFER_LOAD_DWORD_IDXEN:%[0-9]+]]:vgpr_32 = 
BUFFER_LOAD_DWORD_IDXEN [[COPY5]], [[REG_SEQUENCE]], [[S_MOV_B32_2]], 0, 0, 0, 
implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 
8)
   ; CHECK-NEXT:   $vgpr0 = COPY [[BUFFER_LOAD_DWORD_IDXEN]]
   ; CHECK-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0
-  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 
%stride, i32 %numVals, i32 %flags)
+  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, 
i16 %stride, i32 %numVals, i32 %flags)
   %value = call float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) 
%rsrc, i32 0, i32 0, i32 0, i32 0)
   ret float %value
 }
@@ -221,12 +221,52 @@ define amdgpu_ps float 
@general_case_load_with_waterfall(ptr %p, i16 %stride, i3
   ; CHECK-NEXT: bb.5:
   ; CHECK-NEXT:   $vgpr0 = COPY [[BUFFER_LOAD_DWORD_IDXEN]]
   ; CHECK-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0
-  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 
%stride, i32 %numVals, i32 %flags)
+  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, 
i16 %stride, i32 %numVals, i32 %flags)
   %value = call float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) 
%rsrc, i32 0, i32 0, i32 0, i32 0)
   ret float %value
 }
 
-declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr nocapture 
readnone, i16, i32, i32)
-declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) 
nocapture readnone, i16, i32, i32)
+define amdgpu_ps float @read_buffer_fat_ptr_p0(ptr inreg %p) {
+  ; CHECK-LABEL: name: read_buffer_fat_ptr_p0
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $sgpr0, $sgpr1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+  ; CHECK-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; CHECK-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 65535
+  ; CHECK-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY1]], 
[[S_MOV_B32_1]], implicit-def dead $scc
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], 
%subreg.sub0, [[S_AND_B32_]], %subreg.sub1, [[S_MOV_B32_]], %subreg.sub2, 
[[S_MOV_B32_]], %subreg.sub3
+  ; CHECK-NEXT:   [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = 
BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit 
$exec :: (dereferenceable load (s32) from %ir.ptr, align 1, addrspace 8)
+  ; CHECK-NEXT:   $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]]
+  ; CHECK-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0
+  %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr %p, i16 
0, i32 0, i32 0)
+  %loaded = load float, ptr addrspace(7) %ptr
+  ret float %loaded
+}
+
+define amdgpu_ps float @read_buffer_fat_ptr_p1(ptr addrspace(1) inreg %p) {
+  ; CHECK-LABEL: name: read_buffer_fat_ptr_p1
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $sgpr0, $sgpr1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+  ; CHECK-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; CHECK-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 65535
+  ; CHECK-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY1]], 
[[S_MOV_B32_1]], implicit-def dead $scc
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], 
%subreg.sub0, [[S_AND_B32_]], %subreg.sub1, [[S_MOV_B32_]], %subreg.sub2, 
[[S_MOV_B32_]], %subreg.sub3
+  ; CHECK-NEXT:   [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = 
BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit 
$exec :: (dereferenceable load (s32) from %ir.ptr, align 1, addrspace 8)
+  ; CHECK-NEXT:   $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]]
+  ; CHECK-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0
+  %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr 
addrspace(1) %p, i16 0, i32 0, i32 0)
+  %loaded = load float, ptr addrspace(7) %ptr
+  ret float %loaded
+}
+
+declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr nocapture 
readnone, i16, i32, i32)
+declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) 
nocapture readnone, i16, i32, i32)
+declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr nocapture 
readnone, i16, i32, i32)
+declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) 
nocapture readnone, i16, i32, i32)
 declare float @llvm.amdgcn.raw.ptr.buffer.load(ptr addrspace(8) nocapture 
readonly, i32, i32, i32 immarg)
 declare float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) nocapture 
readonly, i32, i32, i32, i32 immarg)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll 
b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll
index b4840bce53d2c..3aa5ea995559f 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll
@@ -18,7 +18,7 @@ define amdgpu_ps ptr addrspace(8) @basic_raw_buffer(ptr inreg 
%p) {
   ; CHECK-NEXT:   $sgpr2 = COPY [[S_MOV_B32_1]]
   ; CHECK-NEXT:   $sgpr3 = COPY [[S_MOV_B32_2]]
   ; CHECK-NEXT:   SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3
-  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 
0, i32 1234, i32 5678)
+  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, 
i16 0, i32 1234, i32 5678)
   ret ptr addrspace(8) %rsrc
 }
 
@@ -36,7 +36,7 @@ define amdgpu_ps float @read_raw_buffer(ptr addrspace(1) 
inreg %p) {
   ; CHECK-NEXT:   [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = 
BUFFER_LOAD_DWORD_OFFSET killed [[REG_SEQUENCE]], [[S_MOV_B32_1]], 4, 0, 0, 
implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 
8)
   ; CHECK-NEXT:   $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]]
   ; CHECK-NEXT:   SI_RETURN_TO_EPILOG $vgpr0
-  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr 
addrspace(1) %p, i16 0, i32 0, i32 0)
+  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr 
addrspace(1) %p, i16 0, i32 0, i32 0)
   %loaded = call float @llvm.amdgcn.raw.ptr.buffer.load(ptr addrspace(8) 
%rsrc, i32 4, i32 0, i32 0)
   ret float %loaded
 }
@@ -59,7 +59,7 @@ define amdgpu_ps ptr addrspace(8) @basic_struct_buffer(ptr 
inreg %p) {
   ; CHECK-NEXT:   $sgpr2 = COPY [[S_MOV_B32_2]]
   ; CHECK-NEXT:   $sgpr3 = COPY [[S_MOV_B32_3]]
   ; CHECK-NEXT:   SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3
-  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 
4, i32 1234, i32 5678)
+  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, 
i16 4, i32 1234, i32 5678)
   ret ptr addrspace(8) %rsrc
 }
 
@@ -81,7 +81,7 @@ define amdgpu_ps ptr addrspace(8) @variable_top_half(ptr 
inreg %p, i32 inreg %nu
   ; CHECK-NEXT:   $sgpr2 = COPY [[COPY1]]
   ; CHECK-NEXT:   $sgpr3 = COPY [[COPY]]
   ; CHECK-NEXT:   SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3
-  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 
4, i32 %numVals, i32 %flags)
+  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, 
i16 4, i32 %numVals, i32 %flags)
   ret ptr addrspace(8) %rsrc
 }
 
@@ -104,7 +104,7 @@ define amdgpu_ps ptr addrspace(8) @general_case(ptr inreg 
%p, i16 inreg %stride,
   ; CHECK-NEXT:   $sgpr2 = COPY [[COPY1]]
   ; CHECK-NEXT:   $sgpr3 = COPY [[COPY]]
   ; CHECK-NEXT:   SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3
-  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 
%stride, i32 %numVals, i32 %flags)
+  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, 
i16 %stride, i32 %numVals, i32 %flags)
   ret ptr addrspace(8) %rsrc
 }
 
@@ -128,7 +128,7 @@ define amdgpu_ps float @general_case_load(ptr inreg %p, i16 
inreg %stride, i32 i
   ; CHECK-NEXT:   [[BUFFER_LOAD_DWORD_IDXEN:%[0-9]+]]:vgpr_32 = 
BUFFER_LOAD_DWORD_IDXEN [[COPY5]], killed [[REG_SEQUENCE]], [[S_MOV_B32_1]], 0, 
0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, 
addrspace 8)
   ; CHECK-NEXT:   $vgpr0 = COPY [[BUFFER_LOAD_DWORD_IDXEN]]
   ; CHECK-NEXT:   SI_RETURN_TO_EPILOG $vgpr0
-  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 
%stride, i32 %numVals, i32 %flags)
+  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, 
i16 %stride, i32 %numVals, i32 %flags)
   %value = call float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) 
%rsrc, i32 0, i32 0, i32 0, i32 0)
   ret float %value
 }
@@ -153,12 +153,52 @@ define amdgpu_ps float 
@general_case_load_with_waterfall(ptr %p, i16 %stride, i3
   ; CHECK-NEXT:   [[BUFFER_LOAD_DWORD_IDXEN:%[0-9]+]]:vgpr_32 = 
BUFFER_LOAD_DWORD_IDXEN [[COPY5]], killed [[REG_SEQUENCE]], [[S_MOV_B32_1]], 0, 
0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, 
addrspace 8)
   ; CHECK-NEXT:   $vgpr0 = COPY [[BUFFER_LOAD_DWORD_IDXEN]]
   ; CHECK-NEXT:   SI_RETURN_TO_EPILOG $vgpr0
-  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 
%stride, i32 %numVals, i32 %flags)
+  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, 
i16 %stride, i32 %numVals, i32 %flags)
   %value = call float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) 
%rsrc, i32 0, i32 0, i32 0, i32 0)
   ret float %value
 }
 
-declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr nocapture 
readnone, i16, i32, i32)
-declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) 
nocapture readnone, i16, i32, i32)
+define amdgpu_ps float @read_buffer_fat_ptr_p0(ptr inreg %p) {
+  ; CHECK-LABEL: name: read_buffer_fat_ptr_p0
+  ; CHECK: bb.0 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $sgpr0, $sgpr1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+  ; CHECK-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65535
+  ; CHECK-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], killed 
[[S_MOV_B32_]], implicit-def dead $scc
+  ; CHECK-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], 
%subreg.sub0, killed [[S_AND_B32_]], %subreg.sub1, [[S_MOV_B32_1]], 
%subreg.sub2, [[S_MOV_B32_1]], %subreg.sub3
+  ; CHECK-NEXT:   [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = 
BUFFER_LOAD_DWORD_OFFSET killed [[REG_SEQUENCE]], [[S_MOV_B32_1]], 0, 0, 0, 
implicit $exec :: (dereferenceable load (s32) from %ir.ptr, align 1, addrspace 
8)
+  ; CHECK-NEXT:   $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]]
+  ; CHECK-NEXT:   SI_RETURN_TO_EPILOG $vgpr0
+  %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr %p, i16 
0, i32 0, i32 0)
+  %loaded = load float, ptr addrspace(7) %ptr
+  ret float %loaded
+}
+
+define amdgpu_ps float @read_buffer_fat_ptr_p1(ptr addrspace(1) inreg %p) {
+  ; CHECK-LABEL: name: read_buffer_fat_ptr_p1
+  ; CHECK: bb.0 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $sgpr0, $sgpr1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+  ; CHECK-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65535
+  ; CHECK-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], killed 
[[S_MOV_B32_]], implicit-def dead $scc
+  ; CHECK-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], 
%subreg.sub0, killed [[S_AND_B32_]], %subreg.sub1, [[S_MOV_B32_1]], 
%subreg.sub2, [[S_MOV_B32_1]], %subreg.sub3
+  ; CHECK-NEXT:   [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = 
BUFFER_LOAD_DWORD_OFFSET killed [[REG_SEQUENCE]], [[S_MOV_B32_1]], 0, 0, 0, 
implicit $exec :: (dereferenceable load (s32) from %ir.ptr, align 1, addrspace 
8)
+  ; CHECK-NEXT:   $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]]
+  ; CHECK-NEXT:   SI_RETURN_TO_EPILOG $vgpr0
+  %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr 
addrspace(1) %p, i16 0, i32 0, i32 0)
+  %loaded = load float, ptr addrspace(7) %ptr
+  ret float %loaded
+}
+
+declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr nocapture 
readnone, i16, i32, i32)
+declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) 
nocapture readnone, i16, i32, i32)
+declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr nocapture 
readnone, i16, i32, i32)
+declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) 
nocapture readnone, i16, i32, i32)
 declare float @llvm.amdgcn.raw.ptr.buffer.load(ptr addrspace(8) nocapture 
readonly, i32, i32, i32 immarg)
 declare float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) nocapture 
readonly, i32, i32, i32, i32 immarg)
diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll 
b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll
index 99fcbc595ff7f..ea4117b418959 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll
@@ -349,6 +349,20 @@ define <2 x ptr addrspace(7)> @addrspacecast_vec(<2 x ptr 
addrspace(8)> %buf) {
   ret <2 x ptr addrspace(7)> %ret
 }
 
+declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1), 
i16, i32, i32)
+
+define ptr addrspace(7) @make_buffer_rsrc(ptr addrspace(1) %buf, i16 %stride, 
i32 %numRecords, i32 %flags) {
+; CHECK-LABEL: define { ptr addrspace(8), i32 } @make_buffer_rsrc
+; CHECK-SAME: (ptr addrspace(1) [[BUF:%.*]], i16 [[STRIDE:%.*]], i32 
[[NUMRECORDS:%.*]], i32 [[FLAGS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[RET:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[BUF]], i16 [[STRIDE]], 
i32 [[NUMRECORDS]], i32 [[FLAGS]])
+; CHECK-NEXT:    [[TMP1:%.*]] = insertvalue { ptr addrspace(8), i32 } poison, 
ptr addrspace(8) [[RET]], 0
+; CHECK-NEXT:    [[TMP2:%.*]] = insertvalue { ptr addrspace(8), i32 } 
[[TMP1]], i32 0, 1
+; CHECK-NEXT:    ret { ptr addrspace(8), i32 } [[TMP2]]
+;
+  %ret = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr 
addrspace(1) %buf, i16 %stride, i32 %numRecords, i32 %flags)
+  ret ptr addrspace(7) %ret
+}
+
 define i1 @icmp_eq(ptr addrspace(7) %a, ptr addrspace(7) %b) {
 ; CHECK-LABEL: define i1 @icmp_eq
 ; CHECK-SAME: ({ ptr addrspace(8), i32 } [[A:%.*]], { ptr addrspace(8), i32 } 
[[B:%.*]]) #[[ATTR0]] {
diff --git a/llvm/test/CodeGen/AMDGPU/make-buffer-rsrc-lds-fails.ll 
b/llvm/test/CodeGen/AMDGPU/make-buffer-rsrc-lds-fails.ll
index 0679686f77eef..4f88077e3b0ee 100644
--- a/llvm/test/CodeGen/AMDGPU/make-buffer-rsrc-lds-fails.ll
+++ b/llvm/test/CodeGen/AMDGPU/make-buffer-rsrc-lds-fails.ll
@@ -3,7 +3,7 @@
 ; RUN: not --crash llc -global-isel -mtriple=amdgcn -mcpu=gfx900 < %s
 
 define amdgpu_ps ptr addrspace(8) @basic_raw_buffer(ptr addrspace(3) inreg %p) 
{
-  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p3(ptr 
addrspace(3) %p, i16 0, i32 1234, i32 5678)
+  %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p3(ptr 
addrspace(3) %p, i16 0, i32 1234, i32 5678)
   ret ptr addrspace(8) %rsrc
 }
-declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p3(ptr addrspace(3) 
nocapture readnone, i16, i32, i32)
+declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p3(ptr addrspace(3) 
nocapture readnone, i16, i32, i32)
diff --git a/llvm/test/CodeGen/AMDGPU/ptr-buffer-alias-scheduling.ll 
b/llvm/test/CodeGen/AMDGPU/ptr-buffer-alias-scheduling.ll
index e2f4d1c6e57bc..0ac3d652050d3 100644
--- a/llvm/test/CodeGen/AMDGPU/ptr-buffer-alias-scheduling.ll
+++ b/llvm/test/CodeGen/AMDGPU/ptr-buffer-alias-scheduling.ll
@@ -85,8 +85,8 @@ define amdgpu_kernel void @buffers_from_flat_dont_alias(ptr 
noalias %a.flat, ptr
 ; GISEL-NEXT:    v_mul_f32_e32 v3, v3, v3
 ; GISEL-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; GISEL-NEXT:    s_endpgm
-  %a = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %a.flat, i16 
0, i32 16, i32 0)
-  %b = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %b.flat, i16 
0, i32 16, i32 0)
+  %a = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %a.flat, 
i16 0, i32 16, i32 0)
+  %b = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %b.flat, 
i16 0, i32 16, i32 0)
 
   %l0 = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) %a, 
i32 0, i32 0, i32 0)
   %s0 = fmul float %l0, %l0
@@ -211,4 +211,4 @@ declare i32 @llvm.amdgcn.workitem.id.x()
 
 declare float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8), i32, i32, 
i32)
 declare void @llvm.amdgcn.raw.ptr.buffer.store.f32(float, ptr addrspace(8), 
i32, i32, i32 immarg)
-declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr readnone 
nocapture, i16, i32, i32)
+declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr readnone 
nocapture, i16, i32, i32)
diff --git a/llvm/test/Transforms/FunctionAttrs/make-buffer-rsrc.ll 
b/llvm/test/Transforms/FunctionAttrs/make-buffer-rsrc.ll
index 59ec2d47bc72c..9ef153183cc9e 100644
--- a/llvm/test/Transforms/FunctionAttrs/make-buffer-rsrc.ll
+++ b/llvm/test/Transforms/FunctionAttrs/make-buffer-rsrc.ll
@@ -9,8 +9,8 @@ define amdgpu_kernel void @test_make_buffer_rsrc(ptr %p, ptr 
%q) {
 ; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind 
willreturn memory(argmem: readwrite)
 ; FNATTRS-LABEL: define {{[^@]+}}@test_make_buffer_rsrc
 ; FNATTRS-SAME: (ptr readonly captures(none) [[P:%.*]], ptr writeonly 
captures(none) [[Q:%.*]]) #[[ATTR0:[0-9]+]] {
-; FNATTRS-NEXT:    [[P_RSRC:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p0(ptr [[P]], i16 0, i32 4, i32 822243328)
-; FNATTRS-NEXT:    [[Q_RSRC:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p0(ptr [[Q]], i16 0, i32 4, i32 822243328)
+; FNATTRS-NEXT:    [[P_RSRC:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P]], i16 0, i32 4, i32 822243328)
+; FNATTRS-NEXT:    [[Q_RSRC:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[Q]], i16 0, i32 4, i32 822243328)
 ; FNATTRS-NEXT:    [[V:%.*]] = call i8 @llvm.amdgcn.raw.ptr.buffer.load.i8(ptr 
addrspace(8) [[P_RSRC]], i32 0, i32 0, i32 0)
 ; FNATTRS-NEXT:    call void @llvm.amdgcn.raw.ptr.buffer.store.i8(i8 [[V]], 
ptr addrspace(8) [[Q_RSRC]], i32 0, i32 0, i32 0)
 ; FNATTRS-NEXT:    ret void
@@ -18,21 +18,21 @@ define amdgpu_kernel void @test_make_buffer_rsrc(ptr %p, 
ptr %q) {
 ; ATTRIBUTOR: Function Attrs: mustprogress nofree norecurse nosync nounwind 
willreturn memory(argmem: readwrite)
 ; ATTRIBUTOR-LABEL: define {{[^@]+}}@test_make_buffer_rsrc
 ; ATTRIBUTOR-SAME: (ptr nofree readonly captures(none) [[P:%.*]], ptr nofree 
writeonly captures(none) [[Q:%.*]]) #[[ATTR0:[0-9]+]] {
-; ATTRIBUTOR-NEXT:    [[P_RSRC:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p0(ptr [[P]], i16 0, i32 4, i32 822243328) 
#[[ATTR4:[0-9]+]]
-; ATTRIBUTOR-NEXT:    [[Q_RSRC:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p0(ptr [[Q]], i16 0, i32 4, i32 822243328) 
#[[ATTR4]]
+; ATTRIBUTOR-NEXT:    [[P_RSRC:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P]], i16 0, i32 4, i32 822243328) 
#[[ATTR4:[0-9]+]]
+; ATTRIBUTOR-NEXT:    [[Q_RSRC:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[Q]], i16 0, i32 4, i32 822243328) 
#[[ATTR4]]
 ; ATTRIBUTOR-NEXT:    [[V:%.*]] = call i8 
@llvm.amdgcn.raw.ptr.buffer.load.i8(ptr addrspace(8) readonly captures(none) 
[[P_RSRC]], i32 0, i32 0, i32 0) #[[ATTR5:[0-9]+]]
 ; ATTRIBUTOR-NEXT:    call void @llvm.amdgcn.raw.ptr.buffer.store.i8(i8 [[V]], 
ptr addrspace(8) writeonly captures(none) [[Q_RSRC]], i32 0, i32 0, i32 0) 
#[[ATTR6:[0-9]+]]
 ; ATTRIBUTOR-NEXT:    ret void
 ;
-  %p.rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %p, i16 
0, i32 4, i32 822243328)
-  %q.rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr %q, i16 
0, i32 4, i32 822243328)
+  %p.rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, 
i16 0, i32 4, i32 822243328)
+  %q.rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %q, 
i16 0, i32 4, i32 822243328)
   %v = call i8 @llvm.amdgcn.raw.ptr.buffer.load.i8(ptr addrspace(8) %p.rsrc, 
i32 0, i32 0, i32 0)
   call void @llvm.amdgcn.raw.ptr.buffer.store.i8(i8 %v, ptr addrspace(8) 
%q.rsrc, i32 0, i32 0, i32 0)
   ret void
 }
 
 ; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable 
willreturn memory(none)
-declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p0(ptr readnone, i16, 
i32, i32) #0
+declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr readnone, 
i16, i32, i32) #0
 
 ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn 
memory(argmem: read)
 declare i8 @llvm.amdgcn.raw.ptr.buffer.load.i8(ptr addrspace(8) nocapture 
readonly, i32, i32, i32 immarg) #1
diff --git a/llvm/test/Transforms/LICM/AMDGPU/buffer-rsrc-ptrs.ll 
b/llvm/test/Transforms/LICM/AMDGPU/buffer-rsrc-ptrs.ll
index 2e539d03afc1c..e69da434c0caf 100644
--- a/llvm/test/Transforms/LICM/AMDGPU/buffer-rsrc-ptrs.ll
+++ b/llvm/test/Transforms/LICM/AMDGPU/buffer-rsrc-ptrs.ll
@@ -165,8 +165,8 @@ define void @hoistable_buffer_construction_intrinsic(ptr 
addrspace(1) noalias %p
 ; CHECK-LABEL: define void @hoistable_buffer_construction_intrinsic
 ; CHECK-SAME: (ptr addrspace(1) noalias [[P_GLOBAL:%.*]], ptr addrspace(1) 
noalias [[Q_GLOBAL:%.*]], i32 [[BOUND:%.*]]) {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) [[P_GLOBAL]], i16 0, i32 0, 
i32 0)
-; CHECK-NEXT:    [[Q:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) [[Q_GLOBAL]], i16 0, i32 0, 
i32 0)
+; CHECK-NEXT:    [[P:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P_GLOBAL]], i16 0, i32 
0, i32 0)
+; CHECK-NEXT:    [[Q:%.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[Q_GLOBAL]], i16 0, i32 
0, i32 0)
 ; CHECK-NEXT:    [[HOISTABLE:%.*]] = call i32 
@llvm.amdgcn.struct.ptr.buffer.load.i32(ptr addrspace(8) [[Q]], i32 0, i32 0, 
i32 0, i32 0)
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
@@ -181,8 +181,8 @@ define void @hoistable_buffer_construction_intrinsic(ptr 
addrspace(1) noalias %p
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %p = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) 
%p.global, i16 0, i32 0, i32 0)
-  %q = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) 
%q.global, i16 0, i32 0, i32 0)
+  %p = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr 
addrspace(1) %p.global, i16 0, i32 0, i32 0)
+  %q = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr 
addrspace(1) %q.global, i16 0, i32 0, i32 0)
   br label %loop
 loop:
   %i = phi i32 [0, %entry], [%next, %loop]
@@ -256,8 +256,8 @@ declare i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr 
addrspace(8) nocapture read
 declare i32 @llvm.amdgcn.struct.ptr.buffer.load.i32(ptr addrspace(8) nocapture 
readonly, i32, i32, i32, i32 immarg) #0
 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: 
write)
 declare void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32, ptr addrspace(8) 
nocapture writeonly, i32, i32, i32 immarg) #1
-; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn 
memory(none)declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr 
addrspace(1) nocapture readnone, i16, i32, i32) #2
-declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) 
readnone nocapture, i16, i32, i32)
+; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn 
memory(none)declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr 
addrspace(1) nocapture readnone, i16, i32, i32) #2
+declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) 
readnone nocapture, i16, i32, i32)
 attributes #0 = { nocallback nofree nosync nounwind willreturn memory(argmem: 
read) }
 attributes #1 = { nocallback nofree nosync nounwind willreturn memory(argmem: 
write) }
 attributes #2 = { nocallback nofree nosync nounwind speculatable willreturn 
memory(none) }
diff --git a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td 
b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
index 7efa4ffa2aa6f..488320e36e837 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
@@ -451,12 +451,12 @@ def ROCDL_GlobalLoadLDSOp :
 def ROCDLBufferRsrc : LLVM_PointerInAddressSpace<8>;
 
 def ROCDL_MakeBufferRsrcOp :
-  ROCDL_IntrOp<"make.buffer.rsrc", [], [0], [Pure], 1>,
+  ROCDL_IntrOp<"make.buffer.rsrc", [0], [0], [Pure], 1>,
   Arguments<(ins LLVM_AnyPointer:$base,
                  I16:$stride,
                  I32:$numRecords,
                  I32:$flags)> {
-  let results = (outs ROCDLBufferRsrc:$res);
+  let results = (outs LLVM_AnyPointer:$res);
   let assemblyFormat = "operands attr-dict `:` type($base) `to` type($res)";
 }
 
@@ -692,7 +692,7 @@ def ROCDL_CvtScaleF32PkFp8F32:
     attr-dict $srcA `,` $srcB `,` $scale `->` $old `[` $wordSel `]` `:` 
type($res)
   }];
 }
-    
+
 def ROCDL_CvtScaleF32PkBf8F32:
     ROCDL_IntrOp<"cvt.scalef32.pk.bf8.f32", [], [], [Pure], 1>,
     Arguments<(ins ROCDL_V2I16Type:$old, F32:$srcA, F32:$srcB, F32: $scale, 
I1:$wordSel)> {
@@ -753,7 +753,7 @@ def ROCDL_CvtScaleF32Bf8Op :
     Arguments<(ins I32:$src, F32: $scale, I32:$byteSel)> {
   let summary = "Scale and convert bf8 to f32";
   let description = [{
-    Scale `src` by the exponent in `scale` then convert 8-bit bf8 value 
+    Scale `src` by the exponent in `scale` then convert 8-bit bf8 value
     from the `byteSel`th bit of `src` to fp32.
   }];
   let assemblyFormat = [{
diff --git a/mlir/test/Target/LLVMIR/rocdl.mlir 
b/mlir/test/Target/LLVMIR/rocdl.mlir
index eac28c57e2ab4..84a30277e63da 100644
--- a/mlir/test/Target/LLVMIR/rocdl.mlir
+++ b/mlir/test/Target/LLVMIR/rocdl.mlir
@@ -851,12 +851,23 @@ llvm.func @rocdl.make.buffer.rsrc(%ptr : !llvm.ptr,
                                   %numRecords : i32,
                                   %flags : i32) -> !llvm.ptr<8> {
   // CHECK-LABEL: rocdl.make.buffer.rsrc
-  // CHECK: %[[rsrc:.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p0(ptr %{{.*}}, i16 %{{.*}}, i32 %{{.*}}, i32 
%{{.*}})
+  // CHECK: %[[rsrc:.*]] = call ptr addrspace(8) 
@llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %{{.*}}, i16 %{{.*}}, i32 %{{.*}}, i32 
%{{.*}})
   // CHECK: ret ptr addrspace(8) %[[rsrc]]
   %rsrc = rocdl.make.buffer.rsrc %ptr, %stride, %numRecords, %flags : 
!llvm.ptr to !llvm.ptr<8>
   llvm.return %rsrc : !llvm.ptr<8>
 }
 
+llvm.func @rocdl.make.buffer.rsrc.p7.p1(%ptr : !llvm.ptr<1>,
+                                  %stride : i16,
+                                  %numRecords : i32,
+                                  %flags : i32) -> !llvm.ptr<7> {
+  // CHECK-LABEL: rocdl.make.buffer.rsrc.p7.p1
+  // CHECK: %[[rsrc:.*]] = call ptr addrspace(7) 
@llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %{{.*}}, i16 %{{.*}}, i32 
%{{.*}}, i32 %{{.*}})
+  // CHECK: ret ptr addrspace(7) %[[rsrc]]
+  %rsrc = rocdl.make.buffer.rsrc %ptr, %stride, %numRecords, %flags : <1> to 
<7>
+  llvm.return %rsrc : !llvm.ptr<7>
+}
+
 llvm.func @rocdl.wmma.fp8(%arg0 : vector<2 x i32>, %arg1 : vector<8xf32>) -> 
vector<8xf32> {
   // CHECK: call <8 x float> 
@llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.v8f32.v2i32(<2 x i32> %{{.*}}, <2 x i32> 
%{{.*}}, <8 x float> %{{.*}})
   %r0 = rocdl.wmma.f32.16x16x16.fp8_fp8 %arg0, %arg0, %arg1: (vector<2xi32>, 
vector<2xi32>, vector<8xf32>) -> vector<8xf32>

>From ed8ff36f34c99865a887a32daf0b56299ce7be86 Mon Sep 17 00:00:00 2001
From: Krzysztof Drewniak <krzysztof.drewn...@amd.com>
Date: Tue, 18 Feb 2025 17:16:37 +0000
Subject: [PATCH 2/2] Review comments

---
 llvm/lib/IR/AutoUpgrade.cpp                  | 1 -
 mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td | 4 ++--
 2 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 48c4c81d324a9..57072715366c9 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -30,7 +30,6 @@
 #include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/IntrinsicsAArch64.h"
-#include "llvm/IR/IntrinsicsAMDGPU.h"
 #include "llvm/IR/IntrinsicsARM.h"
 #include "llvm/IR/IntrinsicsNVPTX.h"
 #include "llvm/IR/IntrinsicsRISCV.h"
diff --git a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td 
b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
index 488320e36e837..01059e42974d0 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td
@@ -692,7 +692,7 @@ def ROCDL_CvtScaleF32PkFp8F32:
     attr-dict $srcA `,` $srcB `,` $scale `->` $old `[` $wordSel `]` `:` 
type($res)
   }];
 }
-
+    
 def ROCDL_CvtScaleF32PkBf8F32:
     ROCDL_IntrOp<"cvt.scalef32.pk.bf8.f32", [], [], [Pure], 1>,
     Arguments<(ins ROCDL_V2I16Type:$old, F32:$srcA, F32:$srcB, F32: $scale, 
I1:$wordSel)> {
@@ -753,7 +753,7 @@ def ROCDL_CvtScaleF32Bf8Op :
     Arguments<(ins I32:$src, F32: $scale, I32:$byteSel)> {
   let summary = "Scale and convert bf8 to f32";
   let description = [{
-    Scale `src` by the exponent in `scale` then convert 8-bit bf8 value
+    Scale `src` by the exponent in `scale` then convert 8-bit bf8 value 
     from the `byteSel`th bit of `src` to fp32.
   }];
   let assemblyFormat = [{

_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to