Author: d0k Date: Thu Sep 7 02:54:03 2017 New Revision: 312710 URL: http://llvm.org/viewvc/llvm-project?rev=312710&view=rev Log: Fixing incorrectly capitalised regexps.
Patch by Sam Allen! Modified: cfe/trunk/test/CodeGen/x86_32-xsave.c cfe/trunk/test/CodeGen/x86_64-xsave.c cfe/trunk/test/CodeGenCXX/anonymous-union-member-initializer.cpp cfe/trunk/test/CodeGenCXX/arm64-constructor-return.cpp Modified: cfe/trunk/test/CodeGen/x86_32-xsave.c URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/x86_32-xsave.c?rev=312710&r1=312709&r2=312710&view=diff ============================================================================== --- cfe/trunk/test/CodeGen/x86_32-xsave.c (original) +++ cfe/trunk/test/CodeGen/x86_32-xsave.c Thu Sep 7 02:54:03 2017 @@ -15,57 +15,57 @@ void test() { void* tmp_vp = 0; #ifdef TEST_XSAVE -// XSAVE: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 -// XSAVE: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVE: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 -// XSAVE: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 -// XSAVE: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 +// XSAVE: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4 +// XSAVE: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVE: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32 +// XSAVE: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32 +// XSAVE: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 // XSAVE: call void @llvm.x86.xsave(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) (void)__builtin_ia32_xsave(tmp_vp, tmp_ULLi); -// XSAVE: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 -// XSAVE: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVE: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32 -// XSAVE: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32 -// XSAVE: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 +// XSAVE: [[tmp_vp_3:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4 +// XSAVE: [[tmp_ULLi_3:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVE: [[high64_3:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_3]], 32 +// XSAVE: [[high32_3:%[0-9a-zA-Z]+]] = trunc i64 [[high64_3]] to i32 +// XSAVE: [[low32_3:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 // XSAVE: call void @llvm.x86.xrstor(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]]) (void)__builtin_ia32_xrstor(tmp_vp, tmp_ULLi); #endif #ifdef TEST_XSAVEOPT -// XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 -// XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVEOPT: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 -// XSAVEOPT: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 -// XSAVEOPT: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 +// XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4 +// XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVEOPT: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32 +// XSAVEOPT: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32 +// XSAVEOPT: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 // XSAVEOPT: call void @llvm.x86.xsaveopt(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) (void)__builtin_ia32_xsaveopt(tmp_vp, tmp_ULLi); #endif #ifdef TEST_XSAVEC -// XSAVEC: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 -// XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVEC: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 -// XSAVEC: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 -// XSAVEC: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 +// XSAVEC: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4 +// XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVEC: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32 +// XSAVEC: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32 +// XSAVEC: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 // XSAVEC: call void @llvm.x86.xsavec(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) (void)__builtin_ia32_xsavec(tmp_vp, tmp_ULLi); #endif #ifdef TEST_XSAVES -// XSAVES: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 -// XSAVES: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVES: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 -// XSAVES: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 -// XSAVES: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 +// XSAVES: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4 +// XSAVES: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVES: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32 +// XSAVES: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32 +// XSAVES: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 // XSAVES: call void @llvm.x86.xsaves(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) (void)__builtin_ia32_xsaves(tmp_vp, tmp_ULLi); -// XSAVES: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 -// XSAVES: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVES: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32 -// XSAVES: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32 -// XSAVES: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 +// XSAVES: [[tmp_vp_3:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 4 +// XSAVES: [[tmp_ULLi_3:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVES: [[high64_3:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_3]], 32 +// XSAVES: [[high32_3:%[0-9a-zA-Z]+]] = trunc i64 [[high64_3]] to i32 +// XSAVES: [[low32_3:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 // XSAVES: call void @llvm.x86.xrstors(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]]) (void)__builtin_ia32_xrstors(tmp_vp, tmp_ULLi); #endif Modified: cfe/trunk/test/CodeGen/x86_64-xsave.c URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/x86_64-xsave.c?rev=312710&r1=312709&r2=312710&view=diff ============================================================================== --- cfe/trunk/test/CodeGen/x86_64-xsave.c (original) +++ cfe/trunk/test/CodeGen/x86_64-xsave.c Thu Sep 7 02:54:03 2017 @@ -15,105 +15,105 @@ void test() { void* tmp_vp = 0; #ifdef TEST_XSAVE -// XSAVE: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 -// XSAVE: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVE: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 -// XSAVE: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 -// XSAVE: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 +// XSAVE: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVE: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVE: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32 +// XSAVE: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32 +// XSAVE: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 // XSAVE: call void @llvm.x86.xsave(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) (void)__builtin_ia32_xsave(tmp_vp, tmp_ULLi); -// XSAVE: [[tmp_vp_2:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 -// XSAVE: [[tmp_ULLi_2:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVE: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32 -// XSAVE: [[high32_2:%[0-9a-zA-z]+]] = trunc i64 [[high64_2]] to i32 -// XSAVE: [[low32_2:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_2]] to i32 +// XSAVE: [[tmp_vp_2:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVE: [[tmp_ULLi_2:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVE: [[high64_2:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_2]], 32 +// XSAVE: [[high32_2:%[0-9a-zA-Z]+]] = trunc i64 [[high64_2]] to i32 +// XSAVE: [[low32_2:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_2]] to i32 // XSAVE: call void @llvm.x86.xsave64(i8* [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]]) (void)__builtin_ia32_xsave64(tmp_vp, tmp_ULLi); -// XSAVE: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 -// XSAVE: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVE: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32 -// XSAVE: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32 -// XSAVE: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 +// XSAVE: [[tmp_vp_3:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVE: [[tmp_ULLi_3:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVE: [[high64_3:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_3]], 32 +// XSAVE: [[high32_3:%[0-9a-zA-Z]+]] = trunc i64 [[high64_3]] to i32 +// XSAVE: [[low32_3:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 // XSAVE: call void @llvm.x86.xrstor(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]]) (void)__builtin_ia32_xrstor(tmp_vp, tmp_ULLi); -// XSAVE: [[tmp_vp_4:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 -// XSAVE: [[tmp_ULLi_4:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVE: [[high64_4:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_4]], 32 -// XSAVE: [[high32_4:%[0-9a-zA-z]+]] = trunc i64 [[high64_4]] to i32 -// XSAVE: [[low32_4:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_4]] to i32 +// XSAVE: [[tmp_vp_4:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVE: [[tmp_ULLi_4:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVE: [[high64_4:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_4]], 32 +// XSAVE: [[high32_4:%[0-9a-zA-Z]+]] = trunc i64 [[high64_4]] to i32 +// XSAVE: [[low32_4:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_4]] to i32 // XSAVE: call void @llvm.x86.xrstor64(i8* [[tmp_vp_4]], i32 [[high32_4]], i32 [[low32_4]]) (void)__builtin_ia32_xrstor64(tmp_vp, tmp_ULLi); #endif #ifdef TEST_XSAVEOPT -// XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 -// XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVEOPT: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 -// XSAVEOPT: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 -// XSAVEOPT: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 +// XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVEOPT: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32 +// XSAVEOPT: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32 +// XSAVEOPT: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 // XSAVEOPT: call void @llvm.x86.xsaveopt(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) (void)__builtin_ia32_xsaveopt(tmp_vp, tmp_ULLi); -// XSAVEOPT: [[tmp_vp_2:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 -// XSAVEOPT: [[tmp_ULLi_2:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVEOPT: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32 -// XSAVEOPT: [[high32_2:%[0-9a-zA-z]+]] = trunc i64 [[high64_2]] to i32 -// XSAVEOPT: [[low32_2:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_2]] to i32 +// XSAVEOPT: [[tmp_vp_2:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVEOPT: [[tmp_ULLi_2:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVEOPT: [[high64_2:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_2]], 32 +// XSAVEOPT: [[high32_2:%[0-9a-zA-Z]+]] = trunc i64 [[high64_2]] to i32 +// XSAVEOPT: [[low32_2:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_2]] to i32 // XSAVEOPT: call void @llvm.x86.xsaveopt64(i8* [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]]) (void)__builtin_ia32_xsaveopt64(tmp_vp, tmp_ULLi); #endif #ifdef TEST_XSAVEC -// XSAVEC: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 -// XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVEC: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 -// XSAVEC: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 -// XSAVEC: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 +// XSAVEC: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVEC: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32 +// XSAVEC: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32 +// XSAVEC: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 // XSAVEC: call void @llvm.x86.xsavec(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) (void)__builtin_ia32_xsavec(tmp_vp, tmp_ULLi); -// XSAVEC: [[tmp_vp_2:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 -// XSAVEC: [[tmp_ULLi_2:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVEC: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32 -// XSAVEC: [[high32_2:%[0-9a-zA-z]+]] = trunc i64 [[high64_2]] to i32 -// XSAVEC: [[low32_2:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_2]] to i32 +// XSAVEC: [[tmp_vp_2:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVEC: [[tmp_ULLi_2:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVEC: [[high64_2:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_2]], 32 +// XSAVEC: [[high32_2:%[0-9a-zA-Z]+]] = trunc i64 [[high64_2]] to i32 +// XSAVEC: [[low32_2:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_2]] to i32 // XSAVEC: call void @llvm.x86.xsavec64(i8* [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]]) (void)__builtin_ia32_xsavec64(tmp_vp, tmp_ULLi); #endif #ifdef TEST_XSAVES -// XSAVES: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 -// XSAVES: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVES: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 -// XSAVES: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 -// XSAVES: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 +// XSAVES: [[tmp_vp_1:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVES: [[tmp_ULLi_1:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVES: [[high64_1:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_1]], 32 +// XSAVES: [[high32_1:%[0-9a-zA-Z]+]] = trunc i64 [[high64_1]] to i32 +// XSAVES: [[low32_1:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 // XSAVES: call void @llvm.x86.xsaves(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) (void)__builtin_ia32_xsaves(tmp_vp, tmp_ULLi); -// XSAVES: [[tmp_vp_2:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 -// XSAVES: [[tmp_ULLi_2:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVES: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32 -// XSAVES: [[high32_2:%[0-9a-zA-z]+]] = trunc i64 [[high64_2]] to i32 -// XSAVES: [[low32_2:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_2]] to i32 +// XSAVES: [[tmp_vp_2:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVES: [[tmp_ULLi_2:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVES: [[high64_2:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_2]], 32 +// XSAVES: [[high32_2:%[0-9a-zA-Z]+]] = trunc i64 [[high64_2]] to i32 +// XSAVES: [[low32_2:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_2]] to i32 // XSAVES: call void @llvm.x86.xsaves64(i8* [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]]) (void)__builtin_ia32_xsaves64(tmp_vp, tmp_ULLi); -// XSAVES: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 -// XSAVES: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVES: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32 -// XSAVES: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32 -// XSAVES: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 +// XSAVES: [[tmp_vp_3:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVES: [[tmp_ULLi_3:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVES: [[high64_3:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_3]], 32 +// XSAVES: [[high32_3:%[0-9a-zA-Z]+]] = trunc i64 [[high64_3]] to i32 +// XSAVES: [[low32_3:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 // XSAVES: call void @llvm.x86.xrstors(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]]) (void)__builtin_ia32_xrstors(tmp_vp, tmp_ULLi); -// XSAVES: [[tmp_vp_4:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 -// XSAVES: [[tmp_ULLi_4:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 -// XSAVES: [[high64_4:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_4]], 32 -// XSAVES: [[high32_4:%[0-9a-zA-z]+]] = trunc i64 [[high64_4]] to i32 -// XSAVES: [[low32_4:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_4]] to i32 +// XSAVES: [[tmp_vp_4:%[0-9a-zA-Z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVES: [[tmp_ULLi_4:%[0-9a-zA-Z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVES: [[high64_4:%[0-9a-zA-Z]+]] = lshr i64 [[tmp_ULLi_4]], 32 +// XSAVES: [[high32_4:%[0-9a-zA-Z]+]] = trunc i64 [[high64_4]] to i32 +// XSAVES: [[low32_4:%[0-9a-zA-Z]+]] = trunc i64 [[tmp_ULLi_4]] to i32 // XSAVES: call void @llvm.x86.xrstors64(i8* [[tmp_vp_4]], i32 [[high32_4]], i32 [[low32_4]]) (void)__builtin_ia32_xrstors64(tmp_vp, tmp_ULLi); #endif Modified: cfe/trunk/test/CodeGenCXX/anonymous-union-member-initializer.cpp URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCXX/anonymous-union-member-initializer.cpp?rev=312710&r1=312709&r2=312710&view=diff ============================================================================== --- cfe/trunk/test/CodeGenCXX/anonymous-union-member-initializer.cpp (original) +++ cfe/trunk/test/CodeGenCXX/anonymous-union-member-initializer.cpp Thu Sep 7 02:54:03 2017 @@ -80,37 +80,37 @@ namespace PR10512 { }; // CHECK-LABEL: define void @_ZN7PR105121AC2Ev - // CHECK: [[THISADDR:%[a-zA-z0-9.]+]] = alloca [[A:%"struct[A-Za-z0-9:.]+"]] - // CHECK-NEXT: store [[A]]* [[THIS:%[a-zA-z0-9.]+]], [[A]]** [[THISADDR]] - // CHECK-NEXT: [[THIS1:%[a-zA-z0-9.]+]] = load [[A]]*, [[A]]** [[THISADDR]] + // CHECK: [[THISADDR:%[a-zA-Z0-9.]+]] = alloca [[A:%"struct[A-Za-z0-9:.]+"]] + // CHECK-NEXT: store [[A]]* [[THIS:%[a-zA-Z0-9.]+]], [[A]]** [[THISADDR]] + // CHECK-NEXT: [[THIS1:%[a-zA-Z0-9.]+]] = load [[A]]*, [[A]]** [[THISADDR]] // CHECK-NEXT: ret void A::A() {} // CHECK-LABEL: define void @_ZN7PR105121AC2Ei - // CHECK: [[THISADDR:%[a-zA-z0-9.]+]] = alloca [[A:%"struct[A-Za-z0-9:.]+"]] - // CHECK-NEXT: [[XADDR:%[a-zA-z0-9.]+]] = alloca i32 - // CHECK-NEXT: store [[A]]* [[THIS:%[a-zA-z0-9.]+]], [[A]]** [[THISADDR]] - // CHECK-NEXT: store i32 [[X:%[a-zA-z0-9.]+]], i32* [[XADDR]] - // CHECK-NEXT: [[THIS1:%[a-zA-z0-9.]+]] = load [[A]]*, [[A]]** [[THISADDR]] + // CHECK: [[THISADDR:%[a-zA-Z0-9.]+]] = alloca [[A:%"struct[A-Za-z0-9:.]+"]] + // CHECK-NEXT: [[XADDR:%[a-zA-Z0-9.]+]] = alloca i32 + // CHECK-NEXT: store [[A]]* [[THIS:%[a-zA-Z0-9.]+]], [[A]]** [[THISADDR]] + // CHECK-NEXT: store i32 [[X:%[a-zA-Z0-9.]+]], i32* [[XADDR]] + // CHECK-NEXT: [[THIS1:%[a-zA-Z0-9.]+]] = load [[A]]*, [[A]]** [[THISADDR]] // CHECK-NEXT: {{getelementptr inbounds.*i32 0, i32 0}} // CHECK-NEXT: {{getelementptr inbounds.*i32 0, i32 0}} // CHECK-NEXT: {{getelementptr inbounds.*i32 0, i32 0}} - // CHECK-NEXT: [[TMP:%[a-zA-z0-9.]+]] = load i32, i32* [[XADDR]] + // CHECK-NEXT: [[TMP:%[a-zA-Z0-9.]+]] = load i32, i32* [[XADDR]] // CHECK-NEXT: store i32 [[TMP]] // CHECK-NEXT: ret void A::A(int x) : x(x) { } // CHECK-LABEL: define void @_ZN7PR105121AC2El - // CHECK: [[THISADDR:%[a-zA-z0-9.]+]] = alloca [[A:%"struct[A-Za-z0-9:.]+"]] - // CHECK-NEXT: [[XADDR:%[a-zA-z0-9.]+]] = alloca i64 - // CHECK-NEXT: store [[A]]* [[THIS:%[a-zA-z0-9.]+]], [[A]]** [[THISADDR]] - // CHECK-NEXT: store i64 [[X:%[a-zA-z0-9.]+]], i64* [[XADDR]] - // CHECK-NEXT: [[THIS1:%[a-zA-z0-9.]+]] = load [[A]]*, [[A]]** [[THISADDR]] + // CHECK: [[THISADDR:%[a-zA-Z0-9.]+]] = alloca [[A:%"struct[A-Za-z0-9:.]+"]] + // CHECK-NEXT: [[XADDR:%[a-zA-Z0-9.]+]] = alloca i64 + // CHECK-NEXT: store [[A]]* [[THIS:%[a-zA-Z0-9.]+]], [[A]]** [[THISADDR]] + // CHECK-NEXT: store i64 [[X:%[a-zA-Z0-9.]+]], i64* [[XADDR]] + // CHECK-NEXT: [[THIS1:%[a-zA-Z0-9.]+]] = load [[A]]*, [[A]]** [[THISADDR]] // CHECK-NEXT: {{getelementptr inbounds.*i32 0, i32 0}} // CHECK-NEXT: {{getelementptr inbounds.*i32 0, i32 1}} // CHECK-NEXT: {{getelementptr inbounds.*i32 0, i32 0}} - // CHECK-NEXT: [[TMP:%[a-zA-z0-9.]+]] = load i64, i64* [[XADDR]] - // CHECK-NEXT: [[CONV:%[a-zA-z0-9.]+]] = trunc i64 [[TMP]] to i32 + // CHECK-NEXT: [[TMP:%[a-zA-Z0-9.]+]] = load i64, i64* [[XADDR]] + // CHECK-NEXT: [[CONV:%[a-zA-Z0-9.]+]] = trunc i64 [[TMP]] to i32 // CHECK-NEXT: store i32 [[CONV]] // CHECK-NEXT: ret void A::A(long y) : y(y) { } Modified: cfe/trunk/test/CodeGenCXX/arm64-constructor-return.cpp URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCXX/arm64-constructor-return.cpp?rev=312710&r1=312709&r2=312710&view=diff ============================================================================== --- cfe/trunk/test/CodeGenCXX/arm64-constructor-return.cpp (original) +++ cfe/trunk/test/CodeGenCXX/arm64-constructor-return.cpp Thu Sep 7 02:54:03 2017 @@ -13,7 +13,7 @@ S::S() { // CHECK: %struct.S* @_ZN1SC2Ev(%struct.S* returned %this) // CHECK: %struct.S* @_ZN1SC1Ev(%struct.S* returned %this) -// CHECK: [[THISADDR:%[a-zA-z0-9.]+]] = alloca %struct.S* +// CHECK: [[THISADDR:%[a-zA-Z0-9.]+]] = alloca %struct.S* // CHECK: store %struct.S* %this, %struct.S** [[THISADDR]] // CHECK: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THISADDR]] // CHECK: ret %struct.S* [[THIS1]] _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits