krzysz00 updated this revision to Diff 496515.
krzysz00 added a comment.
Herald added a project: clang.
Herald added a subscriber: cfe-commits.

Apparently clang has its own copy of this data layout.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D143522/new/

https://reviews.llvm.org/D143522

Files:
  clang/lib/Basic/Targets/AMDGPU.cpp
  clang/test/CodeGen/target-data.c
  clang/test/CodeGenOpenCL/amdgpu-env-amdgcn.cl
  llvm/lib/IR/AutoUpgrade.cpp
  llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
  
llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-non-integral-address-spaces.ll
  llvm/test/CodeGen/AMDGPU/addrspacecast-captured.ll
  llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
  llvm/test/CodeGen/AMDGPU/cgp-addressing-modes.ll
  llvm/test/CodeGen/AMDGPU/force-alwaysinline-lds-global-address.ll
  llvm/test/CodeGen/AMDGPU/loop-idiom.ll
  llvm/test/CodeGen/AMDGPU/mdt-preserving-crash.ll
  llvm/test/CodeGen/AMDGPU/noop-shader-O0.ll
  llvm/test/CodeGen/AMDGPU/nullptr-addrspace-7-tmp.ll
  llvm/test/CodeGen/AMDGPU/nullptr.ll
  llvm/test/CodeGen/AMDGPU/promote-alloca-lifetime.ll
  llvm/test/CodeGen/AMDGPU/promote-alloca-to-lds-select.ll
  llvm/test/CodeGen/AMDGPU/sgpr-copy-local-cse.ll
  llvm/test/CodeGen/AMDGPU/unroll.ll
  
llvm/test/Instrumentation/AddressSanitizer/AMDGPU/adaptive_constant_global_redzones.ll
  llvm/test/Instrumentation/AddressSanitizer/AMDGPU/adaptive_global_redzones.ll
  
llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_do_not_instrument_lds.ll
  
llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_do_not_instrument_scratch.ll
  
llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_constant_address_space.ll
  
llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_generic_address_space.ll
  
llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_global_address_space.ll
  
llvm/test/Instrumentation/AddressSanitizer/AMDGPU/global_metadata_addrspacecasts.ll
  
llvm/test/Instrumentation/AddressSanitizer/AMDGPU/no_redzones_in_lds_globals.ll
  
llvm/test/Instrumentation/AddressSanitizer/AMDGPU/no_redzones_in_scratch_globals.ll
  llvm/test/Transforms/AlignmentFromAssumptions/amdgpu-crash.ll
  llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
  llvm/test/Transforms/EarlyCSE/AMDGPU/memrealtime.ll
  llvm/test/Transforms/IndVarSimplify/AMDGPU/no-widen-to-i64.ll
  llvm/test/Transforms/InferAddressSpaces/AMDGPU/noop-ptrint-pair.ll
  llvm/test/Transforms/InferAddressSpaces/AMDGPU/ptrmask.ll
  llvm/test/Transforms/InferAddressSpaces/X86/noop-ptrint-pair.ll
  llvm/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument.ll
  llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll
  llvm/test/Transforms/InstCombine/alloca-in-non-alloca-as.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/aa-metadata.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/gep-bitcast.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/invariant-load.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/missing-alignment.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/optnone.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/pointer-elements.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects-inseltpoison.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/store_with_aliasing_load.ll
  llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/weird-type-accesses.ll
  llvm/test/Transforms/LoopLoadElim/pr46854-adress-spaces.ll
  llvm/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll
  
llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
  llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-invalid-ptr-extend.ll
  llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll
  llvm/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll
  llvm/test/Transforms/OpenMP/attributor_pointer_offset_crash.ll
  llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll
  llvm/test/Transforms/OpenMP/values_in_offload_arrays.alloca.ll
  
llvm/test/Transforms/SLPVectorizer/AMDGPU/address-space-ptr-sze-gep-index-assert.ll
  llvm/test/Transforms/VectorCombine/AMDGPU/as-transition-inseltpoison.ll
  llvm/test/Transforms/VectorCombine/AMDGPU/as-transition.ll
  llvm/unittests/Bitcode/DataLayoutUpgradeTest.cpp

Index: llvm/unittests/Bitcode/DataLayoutUpgradeTest.cpp
===================================================================
--- llvm/unittests/Bitcode/DataLayoutUpgradeTest.cpp
+++ llvm/unittests/Bitcode/DataLayoutUpgradeTest.cpp
@@ -30,7 +30,13 @@
 
   // Check that AMDGPU targets add -G1 if it's not present.
   EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32", "r600"), "e-p:32:32-G1");
-  EXPECT_EQ(UpgradeDataLayoutString("e-p:64:64", "amdgcn"), "e-p:64:64-G1");
+  // and that ANDGCN adds p7 as well.
+  EXPECT_EQ(UpgradeDataLayoutString("e-p:64:64", "amdgcn"),
+            "e-p:64:64-G1-p7:128:128:128:32");
+  EXPECT_EQ(UpgradeDataLayoutString("e-p:64:64-G1", "amdgcn"),
+            "e-p:64:64-G1-p7:128:128:128:32");
+  // but that r600 does not.
+  EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32-G1", "r600"), "e-p:32:32-G1");
 
   // Check that RISCV64 upgrades -n64 to -n32:64.
   EXPECT_EQ(UpgradeDataLayoutString("e-m:e-p:64:64-i64:64-i128:128-n64-S128",
@@ -59,9 +65,21 @@
   // Check that AMDGPU targets don't add -G1 if there is already a -G flag.
   EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32-G2", "r600"), "e-p:32:32-G2");
   EXPECT_EQ(UpgradeDataLayoutString("G2", "r600"), "G2");
-  EXPECT_EQ(UpgradeDataLayoutString("e-p:64:64-G2", "amdgcn"), "e-p:64:64-G2");
-  EXPECT_EQ(UpgradeDataLayoutString("G2-e-p:64:64", "amdgcn"), "G2-e-p:64:64");
-  EXPECT_EQ(UpgradeDataLayoutString("e-p:64:64-G0", "amdgcn"), "e-p:64:64-G0");
+  EXPECT_EQ(UpgradeDataLayoutString("e-p:64:64-G2", "amdgcn"),
+            "e-p:64:64-G2-p7:128:128:128:32");
+  EXPECT_EQ(UpgradeDataLayoutString("G2-e-p:64:64", "amdgcn"),
+            "G2-e-p:64:64-p7:128:128:128:32");
+  EXPECT_EQ(UpgradeDataLayoutString("e-p:64:64-G0", "amdgcn"),
+            "e-p:64:64-G0-p7:128:128:128:32");
+
+  // Check that AMDGCN targets don't add -p7 if there is already a p7
+  // definition.
+  EXPECT_EQ(UpgradeDataLayoutString("e-p:64:64-p7:64:64", "amdgcn"),
+            "e-p:64:64-p7:64:64-G1");
+  EXPECT_EQ(UpgradeDataLayoutString("p7:64:64-G2-e-p:64:64", "amdgcn"),
+            "p7:64:64-G2-e-p:64:64");
+  EXPECT_EQ(UpgradeDataLayoutString("e-p:64:64-p7:64:64-G1", "amdgcn"),
+            "e-p:64:64-p7:64:64-G1");
 }
 
 TEST(DataLayoutUpgradeTest, EmptyDataLayout) {
@@ -73,7 +91,7 @@
 
   // Check that AMDGPU targets add G1 if it's not present.
   EXPECT_EQ(UpgradeDataLayoutString("", "r600"), "G1");
-  EXPECT_EQ(UpgradeDataLayoutString("", "amdgcn"), "G1");
+  EXPECT_EQ(UpgradeDataLayoutString("", "amdgcn"), "G1-p7:128:128:128:32");
 }
 
 } // end namespace
Index: llvm/test/Transforms/VectorCombine/AMDGPU/as-transition.ll
===================================================================
--- llvm/test/Transforms/VectorCombine/AMDGPU/as-transition.ll
+++ llvm/test/Transforms/VectorCombine/AMDGPU/as-transition.ll
@@ -2,7 +2,7 @@
 ; RUN: opt < %s -passes=vector-combine -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s --check-prefixes=CHECK
 
 ; ModuleID = 'load-as-transition.ll'
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 %struct.hoge = type { float }
Index: llvm/test/Transforms/VectorCombine/AMDGPU/as-transition-inseltpoison.ll
===================================================================
--- llvm/test/Transforms/VectorCombine/AMDGPU/as-transition-inseltpoison.ll
+++ llvm/test/Transforms/VectorCombine/AMDGPU/as-transition-inseltpoison.ll
@@ -2,7 +2,7 @@
 ; RUN: opt < %s -passes=vector-combine -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s --check-prefixes=CHECK
 
 ; ModuleID = 'load-as-transition.ll'
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 %struct.hoge = type { float }
Index: llvm/test/Transforms/SLPVectorizer/AMDGPU/address-space-ptr-sze-gep-index-assert.ll
===================================================================
--- llvm/test/Transforms/SLPVectorizer/AMDGPU/address-space-ptr-sze-gep-index-assert.ll
+++ llvm/test/Transforms/SLPVectorizer/AMDGPU/address-space-ptr-sze-gep-index-assert.ll
@@ -4,7 +4,7 @@
 ; Make sure there's no SCEV assert when the indexes are for different
 ; sized address spaces
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 define void @slp_scev_assert(i32 %idx, i64 %tmp3) #0 {
 ; CHECK-LABEL: @slp_scev_assert(
Index: llvm/test/Transforms/OpenMP/values_in_offload_arrays.alloca.ll
===================================================================
--- llvm/test/Transforms/OpenMP/values_in_offload_arrays.alloca.ll
+++ llvm/test/Transforms/OpenMP/values_in_offload_arrays.alloca.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -passes=openmp-opt-cgscc -aa-pipeline=basic-aa -openmp-hide-memory-transfer-latency < %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 
 @.__omp_offloading_heavyComputation.region_id = weak constant i8 0
 @.offload_maptypes. = private unnamed_addr constant [2 x i64] [i64 35, i64 35]
Index: llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll
===================================================================
--- llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll
+++ llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll
@@ -8,7 +8,7 @@
 ; CHECK: store i32 1, ptr addrspace(3) @IsSPMDMode
 ; CHECK-NOT: store i32 0, ptr addrspace(3) @IsSPMDMode
 ;
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 %struct.ident_t = type { i32, i32, i32, i32, ptr }
Index: llvm/test/Transforms/OpenMP/attributor_pointer_offset_crash.ll
===================================================================
--- llvm/test/Transforms/OpenMP/attributor_pointer_offset_crash.ll
+++ llvm/test/Transforms/OpenMP/attributor_pointer_offset_crash.ll
@@ -2,7 +2,7 @@
 
 ; Verify the address space cast doesn't cause a crash
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 
 %"struct.(anonymous namespace)::TeamStateTy" = type { %"struct.(anonymous namespace)::ICVStateTy", i32, ptr }
 %"struct.(anonymous namespace)::ICVStateTy" = type { i32, i32, i32, i32, i32, i32 }
Index: llvm/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll
===================================================================
--- llvm/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll
+++ llvm/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll
@@ -3,7 +3,7 @@
 ; Test for assert resulting from inconsistent isLegalAddressingMode
 ; answers when the address space was dropped from the query.
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 %0 = type { i32, double, i32, float }
 
Index: llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll
===================================================================
--- llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll
+++ llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll
@@ -4,7 +4,7 @@
 ; Test various conditions where OptimizeLoopTermCond doesn't look at a
 ; memory instruction use and fails to find the address space.
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 define amdgpu_kernel void @local_cmp_user(i32 %arg0) nounwind {
 ; CHECK-LABEL: @local_cmp_user(
Index: llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-invalid-ptr-extend.ll
===================================================================
--- llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-invalid-ptr-extend.ll
+++ llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-invalid-ptr-extend.ll
@@ -5,7 +5,7 @@
 ; Test that LSR does not attempt to extend a pointer type to an integer type,
 ; which causes a SCEV analysis assertion.
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 
 target triple = "amdgcn-amd-amdhsa"
 
Index: llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
===================================================================
--- llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
+++ llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
@@ -3,7 +3,7 @@
 ; Test that loops with different maximum offsets for different address
 ; spaces are correctly handled.
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; OPT-LABEL: @test_global_addressing_loop_uniform_index_max_offset_i32(
 ; OPT: .lr.ph.preheader:
Index: llvm/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll
===================================================================
--- llvm/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll
+++ llvm/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll
@@ -1,6 +1,6 @@
 ; RUN: opt -S -mtriple=amdgcn-- -mcpu=bonaire -loop-reduce < %s | FileCheck -check-prefix=OPT %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; Make sure the pointer / address space of AtomicRMW is considered
 
Index: llvm/test/Transforms/LoopLoadElim/pr46854-adress-spaces.ll
===================================================================
--- llvm/test/Transforms/LoopLoadElim/pr46854-adress-spaces.ll
+++ llvm/test/Transforms/LoopLoadElim/pr46854-adress-spaces.ll
@@ -3,7 +3,7 @@
 
 ; RUN: opt -passes='require<globals-aa>,loop-simplify,loop-load-elim' -S %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 %struct.foo = type { %struct.pluto, i8, ptr, i32 }
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/weird-type-accesses.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/weird-type-accesses.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/weird-type-accesses.ll
@@ -1,6 +1,6 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; Checks that we don't merge loads/stores of types smaller than one
 ; byte, or vectors with elements smaller than one byte.
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/store_with_aliasing_load.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/store_with_aliasing_load.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/store_with_aliasing_load.ll
@@ -1,6 +1,6 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; Check that, in the presence of an aliasing load, the stores preceding the
 ; aliasing load are safe to vectorize.
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects.ll
@@ -1,6 +1,6 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer,dce -S -o - %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 define void @base_case(i1 %cnd, ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %out) {
 ; CHECK-LABEL: @base_case
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects-inseltpoison.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects-inseltpoison.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/selects-inseltpoison.ll
@@ -1,6 +1,6 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer,dce -S -o - %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 define void @base_case(i1 %cnd, ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %out) {
 ; CHECK-LABEL: @base_case
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/pointer-elements.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/pointer-elements.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/pointer-elements.ll
@@ -1,6 +1,6 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 declare i32 @llvm.amdgcn.workitem.id.x() #1
 
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/optnone.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/optnone.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/optnone.ll
@@ -1,6 +1,6 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; CHECK-LABEL: @optnone(
 ; CHECK: store i32
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll
@@ -1,6 +1,6 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; CHECK-LABEL: @no_implicit_float(
 ; CHECK: store i32
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll
@@ -1,7 +1,7 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=GCN,GFX7 %s
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=GCN,GFX9 %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; Checks that there is no crash when there are multiple tails
 ; for a the same head starting a chain.
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/missing-alignment.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/missing-alignment.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/missing-alignment.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -mtriple=amdgcn-- -mcpu=tonga -passes=load-store-vectorizer -S -o - %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 @lds = internal addrspace(3) global [512 x float] undef, align 4
 
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll
@@ -1,6 +1,6 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; CHECK-LABEL: @merge_v2i32_v2i32(
 ; CHECK: load <4 x i32>
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
@@ -2,7 +2,7 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa --mcpu=hawaii -passes=load-store-vectorizer -S -o - %s | FileCheck %s
 ; Copy of test/CodeGen/AMDGPU/merge-stores.ll with some additions
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; TODO: Vector element tests
 ; TODO: Non-zero base offset for load and store combinations
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll
@@ -5,7 +5,7 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-8,+unaligned-scratch-access -passes=load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT8,ELT8-UNALIGNED,UNALIGNED,ALL %s
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-16,+unaligned-scratch-access -passes=load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT16,ELT16-UNALIGNED,UNALIGNED,ALL %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i32
 ; ELT4-ALIGNED: store i32
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/invariant-load.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/invariant-load.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/invariant-load.ll
@@ -1,7 +1,7 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck %s
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S -o - %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; CHECK-LABEL: @interleave
 ; CHECK: load <2 x double>, ptr addrspace(1) %{{.}}, align 8{{$}}
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll
@@ -1,7 +1,7 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck %s
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S -o - %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; This is NOT OK to vectorize, as either load may alias either store.
 
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll
@@ -1,7 +1,7 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck %s
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S -o - %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; Check position of the inserted vector load/store.  Vectorized loads should be
 ; inserted at the position of the first load in the chain, and stores should be
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/gep-bitcast.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/gep-bitcast.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/gep-bitcast.ll
@@ -1,7 +1,7 @@
 ; RUN: opt -S -mtriple=amdgcn--amdhsa -passes=load-store-vectorizer < %s | FileCheck %s
 ; RUN: opt -S -mtriple=amdgcn--amdhsa -passes='function(load-store-vectorizer)' < %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; Check that vectorizer can find a GEP through bitcast
 ; CHECK-LABEL: @vect_zext_bitcast_f32_to_i32_idx
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
@@ -1,7 +1,7 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck %s
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S -o - %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 declare i32 @llvm.amdgcn.workitem.id.x() #1
 
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll
@@ -1,7 +1,7 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck %s
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S -o - %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 declare i64 @_Z12get_local_idj(i32)
 
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
@@ -5,7 +5,7 @@
 ; RUN: opt -S -passes='function(load-store-vectorizer)' --mcpu=hawaii -mattr=+unaligned-access-mode,+unaligned-scratch-access,+max-private-element-size-16 < %s | FileCheck -check-prefixes=CHECK,UNALIGNED %s
 
 target triple = "amdgcn--"
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 define amdgpu_kernel void @load_unknown_offset_align1_i8(ptr addrspace(1) noalias %out, i32 %offset) #0 {
 ; ALIGNED-LABEL: @load_unknown_offset_align1_i8(
Index: llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/aa-metadata.ll
===================================================================
--- llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/aa-metadata.ll
+++ llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/aa-metadata.ll
@@ -2,7 +2,7 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -aa-pipeline=basic-aa,scoped-noalias-aa -passes=load-store-vectorizer -S -o - %s | FileCheck -check-prefix=SCOPE %s
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -aa-pipeline=basic-aa -passes=load-store-vectorizer -S -o - %s | FileCheck -check-prefix=NOSCOPE %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; This fails to vectorize if the !alias.scope is not used
 
Index: llvm/test/Transforms/InstCombine/alloca-in-non-alloca-as.ll
===================================================================
--- llvm/test/Transforms/InstCombine/alloca-in-non-alloca-as.ll
+++ llvm/test/Transforms/InstCombine/alloca-in-non-alloca-as.ll
@@ -3,7 +3,7 @@
 
 ; Gracefully handle the alloca that is not in the alloca AS (=5)
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 declare void @use(ptr, ptr)
Index: llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll
===================================================================
--- llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll
+++ llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll
@@ -4,7 +4,7 @@
 ; Make sure the optimization from memcpy-from-global.ll happens, but
 ; the constant source is not a global variable.
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; Simple memcpy to alloca from constant address space argument.
 define i8 @memcpy_constant_arg_ptr_to_alloca(ptr addrspace(4) noalias readonly align 4 dereferenceable(32) %arg, i32 %idx) {
Index: llvm/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument.ll
===================================================================
--- llvm/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument.ll
+++ llvm/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument.ll
@@ -1,6 +1,6 @@
 ; RUN: opt -mtriple=amdgcn--amdhsa -S -passes=inline -inline-threshold=0 < %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 define void @use_flat_ptr_arg(ptr nocapture %p) {
 entry:
Index: llvm/test/Transforms/InferAddressSpaces/X86/noop-ptrint-pair.ll
===================================================================
--- llvm/test/Transforms/InferAddressSpaces/X86/noop-ptrint-pair.ll
+++ llvm/test/Transforms/InferAddressSpaces/X86/noop-ptrint-pair.ll
@@ -2,7 +2,7 @@
 
 ; Check that assert in X86TargetMachine::isNoopAddrSpaceCast is not triggered.
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
 
 ; CHECK-LABEL: @noop_ptrint_pair(
 ; CHECK: addrspacecast ptr addrspace(1) %x to ptr addrspace(4)
Index: llvm/test/Transforms/InferAddressSpaces/AMDGPU/ptrmask.ll
===================================================================
--- llvm/test/Transforms/InferAddressSpaces/AMDGPU/ptrmask.ll
+++ llvm/test/Transforms/InferAddressSpaces/AMDGPU/ptrmask.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=infer-address-spaces,instsimplify %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 define i8 @ptrmask_cast_local_to_flat(ptr addrspace(3) %src.ptr, i64 %mask) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat(
Index: llvm/test/Transforms/InferAddressSpaces/AMDGPU/noop-ptrint-pair.ll
===================================================================
--- llvm/test/Transforms/InferAddressSpaces/AMDGPU/noop-ptrint-pair.ll
+++ llvm/test/Transforms/InferAddressSpaces/AMDGPU/noop-ptrint-pair.ll
@@ -1,7 +1,7 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -o - -passes=infer-address-spaces %s | FileCheck -check-prefixes=COMMON,AMDGCN %s
 ; RUN: opt -S -o - -passes=infer-address-spaces -assume-default-is-flat-addrspace %s | FileCheck -check-prefixes=COMMON,NOTTI %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
 
 ; COMMON-LABEL: @noop_ptrint_pair(
 ; AMDGCN-NEXT: store i32 0, ptr addrspace(1) %{{.*}}
Index: llvm/test/Transforms/IndVarSimplify/AMDGPU/no-widen-to-i64.ll
===================================================================
--- llvm/test/Transforms/IndVarSimplify/AMDGPU/no-widen-to-i64.ll
+++ llvm/test/Transforms/IndVarSimplify/AMDGPU/no-widen-to-i64.ll
@@ -9,7 +9,7 @@
 ; twice as expensive as that on a 32-bit integer, or split into 2
 ; 32-bit components.
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; CHECK-LABEL: @indvar_32_bit(
 ; CHECK-NOT: sext i32
Index: llvm/test/Transforms/EarlyCSE/AMDGPU/memrealtime.ll
===================================================================
--- llvm/test/Transforms/EarlyCSE/AMDGPU/memrealtime.ll
+++ llvm/test/Transforms/EarlyCSE/AMDGPU/memrealtime.ll
@@ -1,5 +1,5 @@
 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes='early-cse<memssa>' -earlycse-debug-hash < %s | FileCheck %s
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; CHECK-LABEL: @memrealtime(
 ; CHECK: call i64 @llvm.amdgcn.s.memrealtime()
Index: llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
===================================================================
--- llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
+++ llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
@@ -2,7 +2,7 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -atomic-expand %s | FileCheck %s
 ; RUN: opt -mtriple=r600-mesa-mesa3d -S -atomic-expand %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 define i16 @test_atomicrmw_xchg_i16_global(ptr addrspace(1) %ptr, i16 %value) {
 ; CHECK-LABEL: @test_atomicrmw_xchg_i16_global(
Index: llvm/test/Transforms/AlignmentFromAssumptions/amdgpu-crash.ll
===================================================================
--- llvm/test/Transforms/AlignmentFromAssumptions/amdgpu-crash.ll
+++ llvm/test/Transforms/AlignmentFromAssumptions/amdgpu-crash.ll
@@ -1,7 +1,7 @@
 ; Test that we don't crash.
 ; RUN: opt < %s -passes=alignment-from-assumptions -S
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
 
 %"core::str::CharIndices.29.66.90.114.138.149.165.173.181.197.205.213.229.387.398" = type { [0 x i64], i64, [0 x i64], { ptr, ptr }, [0 x i64] }
 %"unwind::libunwind::_Unwind_Exception.9.51.75.99.123.147.163.171.179.195.203.211.227.385.396" = type { [0 x i64], i64, [0 x i64], ptr, [0 x i64], [6 x i64], [0 x i64] }
Index: llvm/test/Instrumentation/AddressSanitizer/AMDGPU/no_redzones_in_scratch_globals.ll
===================================================================
--- llvm/test/Instrumentation/AddressSanitizer/AMDGPU/no_redzones_in_scratch_globals.ll
+++ llvm/test/Instrumentation/AddressSanitizer/AMDGPU/no_redzones_in_scratch_globals.ll
@@ -1,5 +1,5 @@
 ; RUN: opt < %s -passes=asan -S | FileCheck %s
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 @G10 = addrspace(5) global [10 x i8] zeroinitializer, align 1
Index: llvm/test/Instrumentation/AddressSanitizer/AMDGPU/no_redzones_in_lds_globals.ll
===================================================================
--- llvm/test/Instrumentation/AddressSanitizer/AMDGPU/no_redzones_in_lds_globals.ll
+++ llvm/test/Instrumentation/AddressSanitizer/AMDGPU/no_redzones_in_lds_globals.ll
@@ -1,5 +1,5 @@
 ; RUN: opt < %s -passes=asan -S | FileCheck %s
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 @G10 = addrspace(3) global [10 x i8] zeroinitializer, align 1
Index: llvm/test/Instrumentation/AddressSanitizer/AMDGPU/global_metadata_addrspacecasts.ll
===================================================================
--- llvm/test/Instrumentation/AddressSanitizer/AMDGPU/global_metadata_addrspacecasts.ll
+++ llvm/test/Instrumentation/AddressSanitizer/AMDGPU/global_metadata_addrspacecasts.ll
@@ -1,5 +1,5 @@
 ; RUN: opt < %s -passes=asan -S | FileCheck %s
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 @g = addrspace(1) global [1 x i32] zeroinitializer, align 4
Index: llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_global_address_space.ll
===================================================================
--- llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_global_address_space.ll
+++ llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_global_address_space.ll
@@ -1,5 +1,5 @@
 ; RUN: opt < %s -passes=asan -S | FileCheck %s
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 define protected amdgpu_kernel void @global_store(ptr addrspace(1) %p, i32 %i) sanitize_address {
Index: llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_generic_address_space.ll
===================================================================
--- llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_generic_address_space.ll
+++ llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_generic_address_space.ll
@@ -1,5 +1,5 @@
 ; RUN: opt < %s -passes=asan -S | FileCheck %s
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 define protected amdgpu_kernel void @generic_store(ptr addrspace(1) %p, i32 %i) sanitize_address {
Index: llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_constant_address_space.ll
===================================================================
--- llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_constant_address_space.ll
+++ llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_constant_address_space.ll
@@ -1,5 +1,5 @@
 ; RUN: opt < %s -passes=asan -S | FileCheck %s
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 @x = addrspace(4) global [2 x i32] zeroinitializer, align 4
Index: llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_do_not_instrument_scratch.ll
===================================================================
--- llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_do_not_instrument_scratch.ll
+++ llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_do_not_instrument_scratch.ll
@@ -1,5 +1,5 @@
 ; RUN: opt < %s -passes=asan -S | FileCheck %s
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 ; Memory access to scratch are not instrumented
Index: llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_do_not_instrument_lds.ll
===================================================================
--- llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_do_not_instrument_lds.ll
+++ llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_do_not_instrument_lds.ll
@@ -1,5 +1,5 @@
 ; RUN: opt < %s -passes=asan -S | FileCheck %s
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 ; Memory access to lds are not instrumented
Index: llvm/test/Instrumentation/AddressSanitizer/AMDGPU/adaptive_global_redzones.ll
===================================================================
--- llvm/test/Instrumentation/AddressSanitizer/AMDGPU/adaptive_global_redzones.ll
+++ llvm/test/Instrumentation/AddressSanitizer/AMDGPU/adaptive_global_redzones.ll
@@ -1,5 +1,5 @@
 ; RUN: opt < %s -passes=asan -S | FileCheck %s
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 ; Here we check that the global redzone sizes grow with the object size
Index: llvm/test/Instrumentation/AddressSanitizer/AMDGPU/adaptive_constant_global_redzones.ll
===================================================================
--- llvm/test/Instrumentation/AddressSanitizer/AMDGPU/adaptive_constant_global_redzones.ll
+++ llvm/test/Instrumentation/AddressSanitizer/AMDGPU/adaptive_constant_global_redzones.ll
@@ -1,5 +1,5 @@
 ; RUN: opt < %s -passes=asan -S | FileCheck %s
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 ; Here we check that the global redzone sizes grow with the object size
Index: llvm/test/CodeGen/AMDGPU/unroll.ll
===================================================================
--- llvm/test/CodeGen/AMDGPU/unroll.ll
+++ llvm/test/CodeGen/AMDGPU/unroll.ll
@@ -1,7 +1,7 @@
 ; RUN: opt -mtriple=amdgcn-- -passes='loop-unroll,simplifycfg,sroa' %s -S -o - | FileCheck %s
 ; RUN: opt -mtriple=r600-- -passes='loop-unroll,simplifycfg,sroa' %s -S -o - | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; This test contains a simple loop that initializes an array declared in
 ; private memory.  We want to make sure these kinds of loops are always
Index: llvm/test/CodeGen/AMDGPU/sgpr-copy-local-cse.ll
===================================================================
--- llvm/test/CodeGen/AMDGPU/sgpr-copy-local-cse.ll
+++ llvm/test/CodeGen/AMDGPU/sgpr-copy-local-cse.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -verify-machineinstrs -o - %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 ; CHECK-LABEL: {{^}}t0:
Index: llvm/test/CodeGen/AMDGPU/promote-alloca-to-lds-select.ll
===================================================================
--- llvm/test/CodeGen/AMDGPU/promote-alloca-to-lds-select.ll
+++ llvm/test/CodeGen/AMDGPU/promote-alloca-to-lds-select.ll
@@ -1,6 +1,6 @@
 ; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -passes=amdgpu-promote-alloca < %s | FileCheck %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; CHECK-LABEL: @lds_promoted_alloca_select_invalid_pointer_operand(
 ; CHECK: %alloca = alloca i32
Index: llvm/test/CodeGen/AMDGPU/promote-alloca-lifetime.ll
===================================================================
--- llvm/test/CodeGen/AMDGPU/promote-alloca-lifetime.ll
+++ llvm/test/CodeGen/AMDGPU/promote-alloca-lifetime.ll
@@ -1,6 +1,6 @@
 ; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -passes=amdgpu-promote-alloca %s | FileCheck -check-prefix=OPT %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 declare void @llvm.lifetime.start.p5(i64, ptr addrspace(5) nocapture) #0
 declare void @llvm.lifetime.end.p5(i64, ptr addrspace(5) nocapture) #0
Index: llvm/test/CodeGen/AMDGPU/nullptr.ll
===================================================================
--- llvm/test/CodeGen/AMDGPU/nullptr.ll
+++ llvm/test/CodeGen/AMDGPU/nullptr.ll
@@ -29,9 +29,10 @@
 ; R600-NEXT: .long 0
 @nullptr6 = global ptr addrspace(6) addrspacecast (ptr null to ptr addrspace(6))
 
-; CHECK-LABEL: nullptr7:
-; R600-NEXT: .long 0
-@nullptr7 = global ptr addrspace(7) addrspacecast (ptr null to ptr addrspace(7))
+; FIXME: AsmPrinter can't handle 128-bit constants
+; FIXME-LABEL: nullptr7:
+; FIXME-R600-NEXT: .long 0
+; FIXME @nullptr7 = global ptr addrspace(7) addrspacecast (ptr null to ptr addrspace(7))
 
 ; CHECK-LABEL: nullptr8:
 ; R600-NEXT: .long 0
Index: llvm/test/CodeGen/AMDGPU/nullptr-addrspace-7-tmp.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/AMDGPU/nullptr-addrspace-7-tmp.ll
@@ -0,0 +1,12 @@
+; XFAIL: *
+; RUN: llc < %s -march=amdgcn -mtriple=amdgcn-- -verify-machineinstrs | FileCheck -check-prefixes=CHECK,GCN %s
+; RUN: llc < %s -march=r600 -mtriple=r600-- -verify-machineinstrs | FileCheck -check-prefixes=CHECK,R600 %s
+
+; This is a temporary xfail, as the assembly printer is broken when dealing with
+; lowerConstant() trying to return a value of size greater than 8 bytes.
+
+; CHECK-LABEL: nullptr7:
+; The exact form of the GCN output depends on how the printer gets fixed.
+; GCN-NEXT: .zeroes 4
+; R600-NEXT: .long 0
+@nullptr7 = global ptr addrspace(7) addrspacecast (ptr null to ptr addrspace(7))
Index: llvm/test/CodeGen/AMDGPU/noop-shader-O0.ll
===================================================================
--- llvm/test/CodeGen/AMDGPU/noop-shader-O0.ll
+++ llvm/test/CodeGen/AMDGPU/noop-shader-O0.ll
@@ -6,7 +6,7 @@
 ; Confirm registers reserved in SIMachineFunctionInfo are those expected during
 ; lowering, even when e.g. spilling is required due to being at OptNone.
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 target triple = "amdgcn-amd-amdpal"
 
 define amdgpu_vs void @noop_vs() {
Index: llvm/test/CodeGen/AMDGPU/mdt-preserving-crash.ll
===================================================================
--- llvm/test/CodeGen/AMDGPU/mdt-preserving-crash.ll
+++ llvm/test/CodeGen/AMDGPU/mdt-preserving-crash.ll
@@ -1,5 +1,5 @@
 ; RUN: llc < %s
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
 @_RSENC_gDcd_______________________________ = external protected addrspace(1) externally_initialized global [4096 x i8], align 16
Index: llvm/test/CodeGen/AMDGPU/loop-idiom.ll
===================================================================
--- llvm/test/CodeGen/AMDGPU/loop-idiom.ll
+++ llvm/test/CodeGen/AMDGPU/loop-idiom.ll
@@ -2,7 +2,7 @@
 ; RUN: opt -passes=loop-idiom -S < %s -march=amdgcn -mcpu=tahiti -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
 ; RUN: opt -passes=loop-idiom -S < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; Make sure loop-idiom doesn't create memcpy or memset.  There are no library
 ; implementations of these for R600.
Index: llvm/test/CodeGen/AMDGPU/force-alwaysinline-lds-global-address.ll
===================================================================
--- llvm/test/CodeGen/AMDGPU/force-alwaysinline-lds-global-address.ll
+++ llvm/test/CodeGen/AMDGPU/force-alwaysinline-lds-global-address.ll
@@ -3,7 +3,7 @@
 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -amdgpu-stress-function-calls -passes=amdgpu-always-inline -amdgpu-enable-lower-module-lds=false %s | FileCheck --check-prefix=ALL %s
 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -amdgpu-stress-function-calls -passes=amdgpu-always-inline -amdgpu-enable-lower-module-lds=false %s | FileCheck --check-prefix=ALL %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 @lds0 = addrspace(3) global i32 undef, align 4
 @lds1 = addrspace(3) global [512 x i32] undef, align 4
Index: llvm/test/CodeGen/AMDGPU/cgp-addressing-modes.ll
===================================================================
--- llvm/test/CodeGen/AMDGPU/cgp-addressing-modes.ll
+++ llvm/test/CodeGen/AMDGPU/cgp-addressing-modes.ll
@@ -7,7 +7,7 @@
 ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -amdgpu-scalarize-global-loads=false -mattr=-promote-alloca -amdgpu-sroa=0 < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=SICIVI %s
 ; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-promote-alloca -amdgpu-scalarize-global-loads=false -amdgpu-sroa=0 < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 ; OPT-LABEL: @test_sink_global_small_offset_i32(
 ; OPT-CI-NOT: getelementptr i32, ptr addrspace(1) %in
Index: llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
===================================================================
--- llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
+++ llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
@@ -2,7 +2,7 @@
 ; RUN: opt -mtriple=amdgcn-unknown-amdhsa -S -amdgpu-annotate-kernel-features < %s | FileCheck -check-prefixes=HSA,AKF_HSA %s
 ; RUN: opt -mtriple=amdgcn-unknown-amdhsa -S -amdgpu-attributor < %s | FileCheck -check-prefixes=HSA,ATTRIBUTOR_HSA %s
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 declare i32 @llvm.amdgcn.workgroup.id.x() #0
 declare i32 @llvm.amdgcn.workgroup.id.y() #0
Index: llvm/test/CodeGen/AMDGPU/addrspacecast-captured.ll
===================================================================
--- llvm/test/CodeGen/AMDGPU/addrspacecast-captured.ll
+++ llvm/test/CodeGen/AMDGPU/addrspacecast-captured.ll
@@ -1,7 +1,7 @@
 ; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -passes=amdgpu-promote-alloca < %s | FileCheck %s
 ; Nothing should be done if the addrspacecast is captured.
 
-target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
 declare void @consume_ptr2int(i32) #0
 
Index: llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-non-integral-address-spaces.ll
===================================================================
--- llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-non-integral-address-spaces.ll
+++ llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-non-integral-address-spaces.ll
@@ -5,14 +5,16 @@
 define ptr addrspace(7) @no_auto_constfold_gep() {
   ; CHECK-LABEL: name: no_auto_constfold_gep
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(p7) = G_CONSTANT i64 0
-  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 123
-  ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p7) = G_PTR_ADD [[C]], [[C1]](s64)
-  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[PTR_ADD]](p7)
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(p7) = G_CONSTANT i128 0
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s128) = G_CONSTANT i128 123
+  ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p7) = G_PTR_ADD [[C]], [[C1]](s128)
+  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[PTR_ADD]](p7)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
-  %gep = getelementptr i8, ptr addrspace(7) null, i64 123
+  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  %gep = getelementptr i8, ptr addrspace(7) null, i32 123
   ret ptr addrspace(7) %gep
 }
 
@@ -20,18 +22,23 @@
 define <2 x ptr addrspace(7)> @no_auto_constfold_gep_vector() {
   ; CHECK-LABEL: name: no_auto_constfold_gep_vector
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(p7) = G_CONSTANT i64 0
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(p7) = G_CONSTANT i128 0
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p7>) = G_BUILD_VECTOR [[C]](p7), [[C]](p7)
-  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 123
-  ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64)
-  ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(<2 x p7>) = G_PTR_ADD [[BUILD_VECTOR]], [[BUILD_VECTOR1]](<2 x s64>)
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 123
+  ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32)
+  ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(<2 x s128>) = G_SEXT [[BUILD_VECTOR1]](<2 x s32>)
+  ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(<2 x p7>) = G_PTR_ADD [[BUILD_VECTOR]], [[SEXT]](<2 x s128>)
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x p7>) = COPY [[PTR_ADD]](<2 x p7>)
-  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x p7>)
+  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x p7>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
   ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
-  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
-  %gep = getelementptr i8, <2 x ptr addrspace(7)> zeroinitializer, <2 x i64> <i64 123, i64 123>
+  ; CHECK-NEXT:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](s32)
+  ; CHECK-NEXT:   $vgpr6 = COPY [[UV6]](s32)
+  ; CHECK-NEXT:   $vgpr7 = COPY [[UV7]](s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+  %gep = getelementptr i8, <2 x ptr addrspace(7)> zeroinitializer, <2 x i32> <i32 123, i32 123>
   ret <2 x ptr addrspace(7)> %gep
 }
Index: llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
===================================================================
--- llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -506,9 +506,10 @@
   }
 
   // 32-bit private, local, and region pointers. 64-bit global, constant and
-  // flat, non-integral buffer fat pointers.
+  // flat, non-integral buffer fat pointers that are 128 bits long but use a
+  // 32-bit index.
   return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
-         "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
+         "-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
          "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1"
          "-ni:7";
 }
Index: llvm/lib/IR/AutoUpgrade.cpp
===================================================================
--- llvm/lib/IR/AutoUpgrade.cpp
+++ llvm/lib/IR/AutoUpgrade.cpp
@@ -4900,8 +4900,16 @@
   // For AMDGPU we uprgrade older DataLayouts to include the default globals
   // address space of 1.
   if (T.isAMDGPU() && !DL.contains("-G") && !DL.startswith("G")) {
+    // Also upgrade in the new address space 7 stanza for GCN
+    if (T.isAMDGCN() && !DL.contains("-p7") && !DL.startswith("p7"))
+      return DL.empty() ? std::string("G1-p7:128:128:128:32")
+                        : (DL + "-G1-p7:128:128:128:32").str();
     return DL.empty() ? std::string("G1") : (DL + "-G1").str();
   }
+  // For AMDGCN, upgrade data layouts to define the size of address space 7.
+  if (T.isAMDGCN() && !DL.contains("-p7") && !DL.starts_with("p7"))
+    return DL.empty() ? std::string("p7:128:128:128:32")
+                      : (DL + "-p7:128:128:128:32").str();
 
   if (T.isRISCV64()) {
     // Make i32 a native type for 64-bit RISC-V.
Index: clang/test/CodeGenOpenCL/amdgpu-env-amdgcn.cl
===================================================================
--- clang/test/CodeGenOpenCL/amdgpu-env-amdgcn.cl
+++ clang/test/CodeGenOpenCL/amdgpu-env-amdgcn.cl
@@ -1,5 +1,5 @@
 // RUN: %clang_cc1 %s -O0 -triple amdgcn -emit-llvm -o - | FileCheck %s
 // RUN: %clang_cc1 %s -O0 -triple amdgcn---opencl -emit-llvm -o - | FileCheck %s
 
-// CHECK: target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+// CHECK: target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 void foo(void) {}
Index: clang/test/CodeGen/target-data.c
===================================================================
--- clang/test/CodeGen/target-data.c
+++ clang/test/CodeGen/target-data.c
@@ -176,12 +176,12 @@
 
 // RUN: %clang_cc1 -triple amdgcn-unknown -target-cpu hawaii -o - -emit-llvm %s \
 // RUN: | FileCheck %s -check-prefix=R600SI
-// R600SI: target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+// R600SI: target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 
 // Test default -target-cpu
 // RUN: %clang_cc1 -triple amdgcn-unknown -o - -emit-llvm %s \
 // RUN: | FileCheck %s -check-prefix=R600SIDefault
-// R600SIDefault: target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+// R600SIDefault: target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 
 // RUN: %clang_cc1 -triple arm64-unknown -o - -emit-llvm %s | \
 // RUN: FileCheck %s -check-prefix=AARCH64
Index: clang/lib/Basic/Targets/AMDGPU.cpp
===================================================================
--- clang/lib/Basic/Targets/AMDGPU.cpp
+++ clang/lib/Basic/Targets/AMDGPU.cpp
@@ -33,7 +33,7 @@
 
 static const char *const DataLayoutStringAMDGCN =
     "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
-    "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
+    "-p7:128:128:128:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
     "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1"
     "-ni:7";
 
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
  • [PATCH] D143522: [AMDGP... Krzysztof Drewniak via Phabricator via cfe-commits

Reply via email to