kschwarz created this revision. kschwarz added reviewers: rjmccall, gchatelet, aemerson. Herald added subscribers: cfe-commits, jfb. Herald added a project: clang.
The IR builder hard-coded the type of the `len` argument of the memory function instrinsics to i64 for several builder functions. During instruction selection of these intrinsics to the corresponding C library functions SelectionDAG ignores the type of the len argument and uses the targets preferred type. GlobalISel however translates these calls using the actual types of the intrinsic, which will miscompile on 32-bit architectures. Using the preferred type when creating the intrinsic call in the first place fixes this issue. Repository: rG LLVM Github Monorepo https://reviews.llvm.org/D76283 Files: clang/test/CodeGen/c11atomics-ios.c clang/test/CodeGen/c11atomics.c clang/test/CodeGen/x86-atomic-long_double.c clang/test/CodeGenCXX/pod-member-memcpys.cpp clang/test/CodeGenCXX/pr20897.cpp llvm/include/llvm/IR/IRBuilder.h llvm/test/Transforms/MemCpyOpt/form-memset.ll
Index: llvm/test/Transforms/MemCpyOpt/form-memset.ll =================================================================== --- llvm/test/Transforms/MemCpyOpt/form-memset.ll +++ llvm/test/Transforms/MemCpyOpt/form-memset.ll @@ -50,7 +50,7 @@ ret void ; CHECK-LABEL: @test1( ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64 +; CHECK: call void @llvm.memset.p0i8.i32 ; CHECK-NOT: store ; CHECK: ret } @@ -152,11 +152,11 @@ ; CHECK-LABEL: @test2( ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 1 %tmp41, i8 -1, i64 8, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 1 %tmp41, i8 -1, i32 8, i1 false) ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 32, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 8 %0, i8 0, i32 32, i1 false) ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 %1, i8 0, i64 32, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 8 %1, i8 0, i32 32, i1 false) ; CHECK-NOT: store ; CHECK: ret } @@ -175,7 +175,7 @@ ret void ; CHECK-LABEL: @test3( ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 %1, i8 0, i64 15, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 4 %1, i8 0, i32 15, i1 false) } ; store followed by memset, different offset scenario @@ -188,7 +188,7 @@ ret void ; CHECK-LABEL: @test4( ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 %1, i8 0, i64 15, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 4 %1, i8 0, i32 15, i1 false) } declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind @@ -204,7 +204,7 @@ ret void ; CHECK-LABEL: @test5( ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 %1, i8 0, i64 15, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 4 %1, i8 0, i32 15, i1 false) } ;; Memset followed by memset. @@ -217,7 +217,7 @@ tail call void @llvm.memset.p0i8.i64(i8* %1, i8 0, i64 12, i1 false) ret void ; CHECK-LABEL: @test6( -; CHECK: call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 24, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* %2, i8 0, i32 24, i1 false) } ; More aggressive heuristic @@ -233,7 +233,7 @@ %4 = getelementptr inbounds i32, i32* %c, i32 4 store i32 -1, i32* %4, align 4 ; CHECK-LABEL: @test7( -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 %5, i8 -1, i64 20, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 4 %5, i8 -1, i32 20, i1 false) ret void } @@ -270,7 +270,7 @@ store i8 -1, i8* getelementptr (i8, i8* bitcast ([16 x i64]* @test9buf to i8*), i64 15), align 1 ret void ; CHECK-LABEL: @test9( -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 16 bitcast ([16 x i64]* @test9buf to i8*), i8 -1, i64 16, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 16 bitcast ([16 x i64]* @test9buf to i8*), i8 -1, i32 16, i1 false) } ; PR19092 @@ -280,7 +280,7 @@ ret void ; CHECK-LABEL: @test10( ; CHECK-NOT: memset -; CHECK: call void @llvm.memset.p0i8.i64(i8* %P, i8 0, i64 42, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* %P, i8 0, i32 42, i1 false) ; CHECK-NOT: memset ; CHECK: ret void } @@ -297,7 +297,7 @@ ret void ; CHECK-LABEL: @test11( ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 %1, i8 1, i64 23, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 4 %1, i8 1, i32 23, i1 false) } ; Alignment should be preserved when there is a store with default align @@ -310,5 +310,5 @@ ret void ; CHECK-LABEL: @test12( ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 %1, i8 0, i64 15, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 4 %1, i8 0, i32 15, i1 false) } Index: llvm/include/llvm/IR/IRBuilder.h =================================================================== --- llvm/include/llvm/IR/IRBuilder.h +++ llvm/include/llvm/IR/IRBuilder.h @@ -424,6 +424,16 @@ return ConstantInt::get(Context, AI); } + ConstantInt *getIntPtrSize(Value *Ptr, uint64_t Size) { + assert(BB && "Must have a basic block to retrieve the module!"); + + Module *M = BB->getParent()->getParent(); + auto *PtrType = Ptr->getType(); + unsigned PtrSize = M->getDataLayout().getPointerSizeInBits( + PtrType->getPointerAddressSpace()); + return getIntN(PtrSize, Size); + } + //===--------------------------------------------------------------------===// // Type creation methods //===--------------------------------------------------------------------===// @@ -505,7 +515,7 @@ MaybeAlign Align, bool isVolatile = false, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) { - return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile, + return CreateMemSet(Ptr, Val, getIntPtrSize(Ptr, Size), Align, isVolatile, TBAATag, ScopeTag, NoAliasTag); } @@ -580,8 +590,8 @@ MDNode *NoAliasTag = nullptr), "Use the version that takes MaybeAlign instead") { return CreateMemCpy(Dst, MaybeAlign(DstAlign), Src, MaybeAlign(SrcAlign), - getInt64(Size), isVolatile, TBAATag, TBAAStructTag, - ScopeTag, NoAliasTag); + getIntPtrSize(Dst, Size), isVolatile, TBAATag, + TBAAStructTag, ScopeTag, NoAliasTag); } CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, @@ -590,7 +600,7 @@ MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) { - return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size), + return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getIntPtrSize(Dst, Size), isVolatile, TBAATag, TBAAStructTag, ScopeTag, NoAliasTag); } @@ -674,15 +684,15 @@ MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr), "Use the version that takes MaybeAlign") { return CreateMemMove(Dst, MaybeAlign(DstAlign), Src, MaybeAlign(SrcAlign), - getInt64(Size), isVolatile, TBAATag, ScopeTag, - NoAliasTag); + getIntPtrSize(Dst, Size), isVolatile, TBAATag, + ScopeTag, NoAliasTag); } CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile = false, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) { - return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size), + return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getIntPtrSize(Dst, Size), isVolatile, TBAATag, ScopeTag, NoAliasTag); } /// FIXME: Remove this function once transition to Align is over. Index: clang/test/CodeGenCXX/pr20897.cpp =================================================================== --- clang/test/CodeGenCXX/pr20897.cpp +++ clang/test/CodeGenCXX/pr20897.cpp @@ -9,7 +9,7 @@ // CHECK: %[[dest_a_gep:.*]] = getelementptr inbounds %struct.Derived, %struct.Derived* %[[this]], i32 0, i32 1 // CHECK-NEXT: %[[src_load:.*]] = load %struct.Derived*, %struct.Derived** {{.*}} // CHECK-NEXT: %[[src_a_gep:.*]] = getelementptr inbounds %struct.Derived, %struct.Derived* %[[src_load:.*]], i32 0, i32 1 -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %[[dest_a_gep]], i8* align 4 %[[src_a_gep]], i64 1, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %[[dest_a_gep]], i8* align 4 %[[src_a_gep]], i32 1, i1 false) // CHECK-NEXT: %[[dest_this:.*]] = load %struct.Derived*, %struct.Derived** %[[retval]] // CHECK-NEXT: ret %struct.Derived* %[[dest_this]] bool a : 1; Index: clang/test/CodeGenCXX/pod-member-memcpys.cpp =================================================================== --- clang/test/CodeGenCXX/pod-member-memcpys.cpp +++ clang/test/CodeGenCXX/pod-member-memcpys.cpp @@ -203,7 +203,7 @@ CALL_CC(BitfieldMember2) // BitfieldMember2 copy-constructor: // CHECK-2-LABEL: define linkonce_odr void @_ZN15BitfieldMember2C2ERKS_(%struct.BitfieldMember2* %this, %struct.BitfieldMember2* dereferenceable({{[0-9]+}}) %0) -// CHECK-2: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}} align 4 {{.*}} align 4 {{.*}}i64 16, i1 false) +// CHECK-2: call void @llvm.memcpy.p0i8.p0i8.i32({{.*}} align 4 {{.*}} align 4 {{.*}}i32 16, i1 false) // CHECK-2: call void @_ZN6NonPODC1ERKS_ // CHECK-2: ret void Index: clang/test/CodeGen/x86-atomic-long_double.c =================================================================== --- clang/test/CodeGen/x86-atomic-long_double.c +++ clang/test/CodeGen/x86-atomic-long_double.c @@ -46,10 +46,10 @@ // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] // CHECK32: [[INC_VALUE:%.+]] = fadd x86_fp80 [[OLD_VALUE]], // CHECK32: [[OLD_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: [[DESIRED_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[INC_VALUE]], x86_fp80* [[DESIRED_VALUE_ADDR]], align 4 // CHECK32: [[OBJ:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* @@ -108,10 +108,10 @@ // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[ORIG_LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] // CHECK32: [[DEC_VALUE:%.+]] = fadd x86_fp80 [[OLD_VALUE]], // CHECK32: [[OLD_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: [[DESIRED_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[DEC_VALUE]], x86_fp80* [[DESIRED_VALUE_ADDR]], align 4 // CHECK32: [[OBJ:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* @@ -177,10 +177,10 @@ // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] // CHECK32: [[INC_VALUE:%.+]] = fsub x86_fp80 [[OLD_VALUE]], // CHECK32: [[OLD_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: [[DESIRED_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[INC_VALUE]], x86_fp80* [[DESIRED_VALUE_ADDR]], align 4 // CHECK32: [[OBJ:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* @@ -213,7 +213,7 @@ // CHECK32: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 4 // CHECK32: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 4 // CHECK32: [[STORE_TEMP_VOID_PTR:%.+]] = bitcast x86_fp80* [[STORE_TEMP_PTR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[STORE_TEMP_VOID_PTR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[STORE_TEMP_VOID_PTR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 {{.+}}, x86_fp80* [[STORE_TEMP_PTR]], align 4 // CHECK32: [[ADDR_VOID:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[STORE_TEMP_VOID_PTR:%.+]] = bitcast x86_fp80* [[STORE_TEMP_PTR]] to i8* @@ -281,10 +281,10 @@ // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] // CHECK32: [[INC_VALUE:%.+]] = fadd x86_fp80 [[OLD_VALUE]], // CHECK32: [[OLD_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: [[DESIRED_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[INC_VALUE]], x86_fp80* [[DESIRED_VALUE_ADDR]], align 4 // CHECK32: [[OBJ:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* @@ -342,10 +342,10 @@ // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[ORIG_LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] // CHECK32: [[DEC_VALUE:%.+]] = fadd x86_fp80 [[OLD_VALUE]], // CHECK32: [[OLD_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: [[DESIRED_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[DEC_VALUE]], x86_fp80* [[DESIRED_VALUE_ADDR]], align 4 // CHECK32: [[OBJ:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* @@ -409,10 +409,10 @@ // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] // CHECK32: [[INC_VALUE:%.+]] = fsub x86_fp80 [[OLD_VALUE]], // CHECK32: [[OLD_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: [[DESIRED_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[INC_VALUE]], x86_fp80* [[DESIRED_VALUE_ADDR]], align 4 // CHECK32: [[OBJ:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* @@ -445,7 +445,7 @@ // CHECK32: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 4 // CHECK32: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 4 // CHECK32: [[STORE_TEMP_VOID_PTR:%.+]] = bitcast x86_fp80* [[STORE_TEMP_PTR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[STORE_TEMP_VOID_PTR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[STORE_TEMP_VOID_PTR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 {{.+}}, x86_fp80* [[STORE_TEMP_PTR]], align 4 // CHECK32: [[ADDR_VOID:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[STORE_TEMP_VOID_PTR:%.+]] = bitcast x86_fp80* [[STORE_TEMP_PTR]] to i8* Index: clang/test/CodeGen/c11atomics.c =================================================================== --- clang/test/CodeGen/c11atomics.c +++ clang/test/CodeGen/c11atomics.c @@ -307,7 +307,7 @@ // CHECK-NEXT: [[P:%.*]] = load [[APS]]*, [[APS]]** [[FP]] // CHECK-NEXT: [[T0:%.*]] = bitcast [[APS]]* [[P]] to i8* -// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[T0]], i8 0, i64 8, i1 false) +// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 8 [[T0]], i8 0, i32 8, i1 false) // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[P]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 0 // CHECK-NEXT: store i16 1, i16* [[T1]], align 8 @@ -402,7 +402,7 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i32 6, i1 false) // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* // CHECK: [[ADDR8:%.*]] = bitcast i64* [[ADDR64]] to i8* // CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 2 @@ -427,7 +427,7 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i32 6, i1 false) // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* // CHECK: [[ATOMIC_RES64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_RES]] to i64* // CHECK: [[ADDR8:%.*]] = bitcast i64* [[ADDR64]] to i8* @@ -461,11 +461,11 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_DESIRED8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED]] to i8* // CHECK: [[DESIRED8:%.*]] = bitcast %struct.PS* [[DESIRED]]to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_DESIRED8]], i8* align 2 [[DESIRED8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[ATOMIC_DESIRED8]], i8* align 2 [[DESIRED8]], i32 6, i1 false) // CHECK: [[ATOMIC_DESIRED64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED]] to i64* // CHECK: [[ATOMIC_NEW8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_NEW8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[ATOMIC_NEW8]], i8* align 2 [[NONATOMIC_TMP8]], i32 6, i1 false) // CHECK: [[ATOMIC_NEW64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i64* // CHECK: [[ADDR8:%.*]] = bitcast i64* [[ADDR64]] to i8* // CHECK: [[ATOMIC_DESIRED8:%.*]] = bitcast i64* [[ATOMIC_DESIRED64]] to i8* Index: clang/test/CodeGen/c11atomics-ios.c =================================================================== --- clang/test/CodeGen/c11atomics-ios.c +++ clang/test/CodeGen/c11atomics-ios.c @@ -154,7 +154,7 @@ // CHECK-NEXT: [[P:%.*]] = load [[APS]]*, [[APS]]** [[FP]] // CHECK-NEXT: [[T0:%.*]] = bitcast [[APS]]* [[P]] to i8* -// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[T0]], i8 0, i64 8, i1 false) +// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 8 [[T0]], i8 0, i32 8, i1 false) // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[P]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 0 // CHECK-NEXT: store i16 1, i16* [[T1]], align 8 @@ -236,7 +236,7 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i32 6, i1 false) // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* // CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 8 // CHECK: store atomic i64 [[VAL64]], i64* [[ADDR64]] seq_cst, align 8 @@ -261,7 +261,7 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i32 6, i1 false) // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* // CHECK: [[ATOMIC_RES64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_RES]] to i64* // CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 8 @@ -295,11 +295,11 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_DESIRED8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED:%.*]] to i8* // CHECK: [[DESIRED8:%.*]] = bitcast %struct.PS* [[DESIRED]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_DESIRED8]], i8* align 2 [[DESIRED8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[ATOMIC_DESIRED8]], i8* align 2 [[DESIRED8]], i32 6, i1 false) // CHECK: [[ATOMIC_DESIRED64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED:%.*]] to i64* // CHECK: [[ATOMIC_NEW8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_NEW8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[ATOMIC_NEW8]], i8* align 2 [[NONATOMIC_TMP8]], i32 6, i1 false) // CHECK: [[ATOMIC_NEW64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i64* // CHECK: [[ATOMIC_DESIRED_VAL64:%.*]] = load i64, i64* [[ATOMIC_DESIRED64]], align 8 // CHECK: [[ATOMIC_NEW_VAL64:%.*]] = load i64, i64* [[ATOMIC_NEW64]], align 8
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits