https://github.com/davemgreen updated https://github.com/llvm/llvm-project/pull/135064
>From 9a56ee32712c213b0fa06257bda9c2f31ec44416 Mon Sep 17 00:00:00 2001 From: David Green <david.gr...@arm.com> Date: Thu, 15 May 2025 20:36:44 +0100 Subject: [PATCH] [AArch64] Change the coercion type of structs with pointer members. The aim here is to avoid a ptrtoint->inttoptr round-trip throught the function argument whilst keeping the calling convention the same. Given a struct which is <= 128bits in size, which can only contain either 1 or 2 pointers, we convert to a ptr or [2 x ptr] as opposed to the old coercion that uses i64 or [2 x i64]. --- clang/lib/CodeGen/Targets/AArch64.cpp | 28 +++++++++++ .../AArch64/struct-coerce-using-ptr.cpp | 50 +++++++++---------- clang/test/CodeGen/ptrauth-in-c-struct.c | 2 +- .../CodeGenCXX/ptrauth-qualifier-struct.cpp | 2 +- clang/test/CodeGenCXX/trivial_abi.cpp | 13 ++--- 5 files changed, 59 insertions(+), 36 deletions(-) diff --git a/clang/lib/CodeGen/Targets/AArch64.cpp b/clang/lib/CodeGen/Targets/AArch64.cpp index f098f09ebf581..5cde055341f2d 100644 --- a/clang/lib/CodeGen/Targets/AArch64.cpp +++ b/clang/lib/CodeGen/Targets/AArch64.cpp @@ -486,9 +486,37 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadicFn, } Size = llvm::alignTo(Size, Alignment); + // If the Aggregate is made up of pointers, use an array of pointers for the + // coerced type. This prevents having to convert ptr2int->int2ptr through + // the call, allowing alias analysis to produce better code. + auto ContainsOnlyPointers = [&](const auto &Self, QualType Ty) { + if (isEmptyRecord(getContext(), Ty, true)) + return false; + const RecordType *RT = Ty->getAs<RecordType>(); + if (!RT) + return false; + const RecordDecl *RD = RT->getDecl(); + if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { + for (const auto &I : CXXRD->bases()) + if (!Self(Self, I.getType())) + return false; + } + return all_of(RD->fields(), [&](FieldDecl *FD) { + QualType FDTy = FD->getType(); + if (FDTy->isArrayType()) + FDTy = getContext().getBaseElementType(FDTy); + return (FDTy->isPointerOrReferenceType() && + getContext().getTypeSize(FDTy) == 64) || + Self(Self, FDTy); + }); + }; + // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. // For aggregates with 16-byte alignment, we use i128. llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment); + if ((Size == 64 || Size == 128) && Alignment == 64 && + ContainsOnlyPointers(ContainsOnlyPointers, Ty)) + BaseTy = llvm::PointerType::getUnqual(getVMContext()); return ABIArgInfo::getDirect( Size == Alignment ? BaseTy : llvm::ArrayType::get(BaseTy, Size / Alignment)); diff --git a/clang/test/CodeGen/AArch64/struct-coerce-using-ptr.cpp b/clang/test/CodeGen/AArch64/struct-coerce-using-ptr.cpp index f7a44a5999887..a41f315340b57 100644 --- a/clang/test/CodeGen/AArch64/struct-coerce-using-ptr.cpp +++ b/clang/test/CodeGen/AArch64/struct-coerce-using-ptr.cpp @@ -29,12 +29,11 @@ struct Sp { int *x; }; // CHECK-A64-LABEL: define dso_local void @_Z2Tp2Sp( -// CHECK-A64-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] { +// CHECK-A64-SAME: ptr [[S_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SP:%.*]], align 8 // CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SP]], ptr [[S]], i32 0, i32 0 -// CHECK-A64-NEXT: [[COERCE_VAL_IP:%.*]] = inttoptr i64 [[S_COERCE]] to ptr -// CHECK-A64-NEXT: store ptr [[COERCE_VAL_IP]], ptr [[COERCE_DIVE]], align 8 +// CHECK-A64-NEXT: store ptr [[S_COERCE]], ptr [[COERCE_DIVE]], align 8 // CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SP]], ptr [[S]], i32 0, i32 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8 // CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4 @@ -58,10 +57,10 @@ struct Spp { int *x, *y; }; // CHECK-A64-LABEL: define dso_local void @_Z3Tpp3Spp( -// CHECK-A64-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] { +// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPP:%.*]], align 8 -// CHECK-A64-NEXT: store [2 x i64] [[S_COERCE]], ptr [[S]], align 8 +// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[S]], align 8 // CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP]], ptr [[S]], i32 0, i32 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8 // CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4 @@ -135,10 +134,10 @@ struct Srp { int &x, *y; }; // CHECK-A64-LABEL: define dso_local void @_Z3Trp3Srp( -// CHECK-A64-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] { +// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SRP:%.*]], align 8 -// CHECK-A64-NEXT: store [2 x i64] [[S_COERCE]], ptr [[S]], align 8 +// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[S]], align 8 // CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SRP]], ptr [[S]], i32 0, i32 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8 // CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4 @@ -160,10 +159,10 @@ struct __attribute__((__packed__)) Spp_packed { int *x, *y; }; // CHECK-A64-LABEL: define dso_local void @_Z10Tpp_packed10Spp_packed( -// CHECK-A64-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] { +// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPP_PACKED:%.*]], align 1 -// CHECK-A64-NEXT: store [2 x i64] [[S_COERCE]], ptr [[S]], align 1 +// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[S]], align 1 // CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_PACKED]], ptr [[S]], i32 0, i32 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 1 // CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4 @@ -185,11 +184,11 @@ struct __attribute__((__packed__)) Spp_superpacked { Spp_packed x; }; // CHECK-A64-LABEL: define dso_local void @_Z15Tpp_superpacked15Spp_superpacked( -// CHECK-A64-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] { +// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPP_SUPERPACKED:%.*]], align 1 // CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_SUPERPACKED]], ptr [[S]], i32 0, i32 0 -// CHECK-A64-NEXT: store [2 x i64] [[S_COERCE]], ptr [[COERCE_DIVE]], align 1 +// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[COERCE_DIVE]], align 1 // CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_SUPERPACKED]], ptr [[S]], i32 0, i32 0 // CHECK-A64-NEXT: [[X1:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_PACKED:%.*]], ptr [[X]], i32 0, i32 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X1]], align 1 @@ -215,12 +214,11 @@ union Upp { long long *y; }; // CHECK-A64-LABEL: define dso_local void @_Z11Tupp_packed3Upp( -// CHECK-A64-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] { +// CHECK-A64-SAME: ptr [[S_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[S:%.*]] = alloca [[UNION_UPP:%.*]], align 8 // CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[UNION_UPP]], ptr [[S]], i32 0, i32 0 -// CHECK-A64-NEXT: [[COERCE_VAL_IP:%.*]] = inttoptr i64 [[S_COERCE]] to ptr -// CHECK-A64-NEXT: store ptr [[COERCE_VAL_IP]], ptr [[COERCE_DIVE]], align 8 +// CHECK-A64-NEXT: store ptr [[S_COERCE]], ptr [[COERCE_DIVE]], align 8 // CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[S]], align 8 // CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4 // CHECK-A64-NEXT: ret void @@ -326,10 +324,10 @@ struct SSpSp { struct Sp a, b; }; // CHECK-A64-LABEL: define dso_local void @_Z5TSpSp5SSpSp( -// CHECK-A64-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] { +// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SSPSP:%.*]], align 8 -// CHECK-A64-NEXT: store [2 x i64] [[S_COERCE]], ptr [[S]], align 8 +// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[S]], align 8 // CHECK-A64-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SSPSP]], ptr [[S]], i32 0, i32 0 // CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SP:%.*]], ptr [[A]], i32 0, i32 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8 @@ -353,11 +351,11 @@ struct SSpp { Spp a; }; // CHECK-A64-LABEL: define dso_local void @_Z4TSpp4SSpp( -// CHECK-A64-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] { +// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SSPP:%.*]], align 8 // CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SSPP]], ptr [[S]], i32 0, i32 0 -// CHECK-A64-NEXT: store [2 x i64] [[S_COERCE]], ptr [[COERCE_DIVE]], align 8 +// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[COERCE_DIVE]], align 8 // CHECK-A64-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SSPP]], ptr [[S]], i32 0, i32 0 // CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP:%.*]], ptr [[A]], i32 0, i32 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8 @@ -382,10 +380,10 @@ struct SSp : public Sp { int* b; }; // CHECK-A64-LABEL: define dso_local void @_Z3TSp3SSp( -// CHECK-A64-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] { +// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SSP:%.*]], align 8 -// CHECK-A64-NEXT: store [2 x i64] [[S_COERCE]], ptr [[S]], align 8 +// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[S]], align 8 // CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SP:%.*]], ptr [[S]], i32 0, i32 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 8 // CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4 @@ -433,11 +431,11 @@ struct Spa { int* xs[1]; }; // CHECK-A64-LABEL: define dso_local void @_Z3Tpa3Spa( -// CHECK-A64-SAME: i64 [[S_COERCE:%.*]]) #[[ATTR0]] { +// CHECK-A64-SAME: ptr [[S_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPA:%.*]], align 8 // CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SPA]], ptr [[S]], i32 0, i32 0 -// CHECK-A64-NEXT: store i64 [[S_COERCE]], ptr [[COERCE_DIVE]], align 8 +// CHECK-A64-NEXT: store ptr [[S_COERCE]], ptr [[COERCE_DIVE]], align 8 // CHECK-A64-NEXT: [[XS:%.*]] = getelementptr inbounds nuw [[STRUCT_SPA]], ptr [[S]], i32 0, i32 0 // CHECK-A64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1 x ptr], ptr [[XS]], i64 0, i64 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 @@ -463,11 +461,11 @@ struct Spa2 { int* xs[2]; }; // CHECK-A64-LABEL: define dso_local void @_Z4Tpa24Spa2( -// CHECK-A64-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] { +// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPA2:%.*]], align 8 // CHECK-A64-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_SPA2]], ptr [[S]], i32 0, i32 0 -// CHECK-A64-NEXT: store [2 x i64] [[S_COERCE]], ptr [[COERCE_DIVE]], align 8 +// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[COERCE_DIVE]], align 8 // CHECK-A64-NEXT: [[XS:%.*]] = getelementptr inbounds nuw [[STRUCT_SPA2]], ptr [[S]], i32 0, i32 0 // CHECK-A64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x ptr], ptr [[XS]], i64 0, i64 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 @@ -523,10 +521,10 @@ struct __attribute__((aligned(16))) Spp_align16 { int *x, *y; }; // CHECK-A64-LABEL: define dso_local void @_Z11Tpp_align1611Spp_align16( -// CHECK-A64-SAME: [2 x i64] [[S_COERCE:%.*]]) #[[ATTR0]] { +// CHECK-A64-SAME: [2 x ptr] [[S_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[S:%.*]] = alloca [[STRUCT_SPP_ALIGN16:%.*]], align 16 -// CHECK-A64-NEXT: store [2 x i64] [[S_COERCE]], ptr [[S]], align 16 +// CHECK-A64-NEXT: store [2 x ptr] [[S_COERCE]], ptr [[S]], align 16 // CHECK-A64-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_SPP_ALIGN16]], ptr [[S]], i32 0, i32 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X]], align 16 // CHECK-A64-NEXT: store i32 1, ptr [[TMP0]], align 4 diff --git a/clang/test/CodeGen/ptrauth-in-c-struct.c b/clang/test/CodeGen/ptrauth-in-c-struct.c index d415c18d0da58..2aec31ec3baf9 100644 --- a/clang/test/CodeGen/ptrauth-in-c-struct.c +++ b/clang/test/CodeGen/ptrauth-in-c-struct.c @@ -158,7 +158,7 @@ void test_copy_constructor_SI(SI *s) { SI t = *s; } -// CHECK: define void @test_parameter_SI(i64 %{{.*}}) +// CHECK: define void @test_parameter_SI(ptr %{{.*}}) // CHECK-NOT: call // CHECK: ret void diff --git a/clang/test/CodeGenCXX/ptrauth-qualifier-struct.cpp b/clang/test/CodeGenCXX/ptrauth-qualifier-struct.cpp index 7d6de50d926b5..daeea77774ec8 100644 --- a/clang/test/CodeGenCXX/ptrauth-qualifier-struct.cpp +++ b/clang/test/CodeGenCXX/ptrauth-qualifier-struct.cpp @@ -99,7 +99,7 @@ void testMoveAssignment(SA a) { t = static_cast<SA &&>(a); } -// CHECK: define {{.*}}void @_Z19testCopyConstructor2SI(i +// CHECK: define {{.*}}void @_Z19testCopyConstructor2SI( // CHECK: call void @llvm.memcpy.p0.p0.i64( void testCopyConstructor(SI a) { diff --git a/clang/test/CodeGenCXX/trivial_abi.cpp b/clang/test/CodeGenCXX/trivial_abi.cpp index 90054dbf37ae3..b8cc0d1cc6528 100644 --- a/clang/test/CodeGenCXX/trivial_abi.cpp +++ b/clang/test/CodeGenCXX/trivial_abi.cpp @@ -68,11 +68,10 @@ struct D0 : B0, B1 { Small D0::m0() { return {}; } -// CHECK: define{{.*}} void @_Z14testParamSmall5Small(i64 %[[A_COERCE:.*]]) +// CHECK: define{{.*}} void @_Z14testParamSmall5Small(ptr %[[A_COERCE:.*]]) // CHECK: %[[A:.*]] = alloca %[[STRUCT_SMALL]], align 8 // CHECK: %[[COERCE_DIVE:.*]] = getelementptr inbounds nuw %[[STRUCT_SMALL]], ptr %[[A]], i32 0, i32 0 -// CHECK: %[[COERCE_VAL_IP:.*]] = inttoptr i64 %[[A_COERCE]] to ptr -// CHECK: store ptr %[[COERCE_VAL_IP]], ptr %[[COERCE_DIVE]], align 8 +// CHECK: store ptr %[[A_COERCE]], ptr %[[COERCE_DIVE]], align 8 // CHECK: %[[CALL:.*]] = call noundef ptr @_ZN5SmallD1Ev(ptr {{[^,]*}} %[[A]]) // CHECK: ret void // CHECK: } @@ -101,8 +100,7 @@ Small testReturnSmall() { // CHECK: %[[CALL1:.*]] = call noundef ptr @_ZN5SmallC1ERKS_(ptr {{[^,]*}} %[[AGG_TMP]], ptr noundef nonnull align 8 dereferenceable(8) %[[T]]) // CHECK: %[[COERCE_DIVE:.*]] = getelementptr inbounds nuw %[[STRUCT_SMALL]], ptr %[[AGG_TMP]], i32 0, i32 0 // CHECK: %[[V0:.*]] = load ptr, ptr %[[COERCE_DIVE]], align 8 -// CHECK: %[[COERCE_VAL_PI:.*]] = ptrtoint ptr %[[V0]] to i64 -// CHECK: call void @_Z14testParamSmall5Small(i64 %[[COERCE_VAL_PI]]) +// CHECK: call void @_Z14testParamSmall5Small(ptr %[[V0]]) // CHECK: %[[CALL2:.*]] = call noundef ptr @_ZN5SmallD1Ev(ptr {{[^,]*}} %[[T]]) // CHECK: ret void // CHECK: } @@ -120,8 +118,7 @@ void testCallSmall0() { // CHECK: store ptr %[[COERCE_VAL_IP]], ptr %[[COERCE_DIVE]], align 8 // CHECK: %[[COERCE_DIVE1:.*]] = getelementptr inbounds nuw %[[STRUCT_SMALL]], ptr %[[AGG_TMP]], i32 0, i32 0 // CHECK: %[[V0:.*]] = load ptr, ptr %[[COERCE_DIVE1]], align 8 -// CHECK: %[[COERCE_VAL_PI:.*]] = ptrtoint ptr %[[V0]] to i64 -// CHECK: call void @_Z14testParamSmall5Small(i64 %[[COERCE_VAL_PI]]) +// CHECK: call void @_Z14testParamSmall5Small(ptr %[[V0]]) // CHECK: ret void // CHECK: } @@ -226,7 +223,7 @@ NonTrivial testReturnHasNonTrivial() { // CHECK: call noundef ptr @_ZN5SmallC1Ev(ptr {{[^,]*}} %[[AGG_TMP]]) // CHECK: invoke noundef ptr @_ZN5SmallC1Ev(ptr {{[^,]*}} %[[AGG_TMP1]]) -// CHECK: call void @_Z20calleeExceptionSmall5SmallS_(i64 %{{.*}}, i64 %{{.*}}) +// CHECK: call void @_Z20calleeExceptionSmall5SmallS_(ptr %{{.*}}, ptr %{{.*}}) // CHECK-NEXT: ret void // CHECK: landingpad { ptr, i32 } _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits