RKSimon created this revision. RKSimon added reviewers: craig.topper, igorb, delena. RKSimon added a subscriber: cfe-commits. RKSimon set the repository for this revision to rL LLVM.
Both the (V)CVTDQ2PD (i32 to f64) and (V)CVTDQ2PD (u32 to f64) conversion instructions are lossless and can be safely represented as generic __builtin_convertvector calls instead of x86 intrinsics without affecting final codegen. This patch removes the clang builtins and their use in the headers - a future patch will deal with removing the llvm intrinsics. This is an extension patch to https://reviews.llvm.org/D20528 which dealt with the equivalent sse/avx cases. Repository: rL LLVM https://reviews.llvm.org/D26686 Files: include/clang/Basic/BuiltinsX86.def lib/Headers/avx512fintrin.h lib/Headers/avx512vlintrin.h test/CodeGen/avx512f-builtins.c test/CodeGen/avx512vl-builtins.c
Index: test/CodeGen/avx512vl-builtins.c =================================================================== --- test/CodeGen/avx512vl-builtins.c +++ test/CodeGen/avx512vl-builtins.c @@ -1737,23 +1737,29 @@ } __m128d test_mm_mask_cvtepi32_pd(__m128d __W, __mmask8 __U, __m128i __A) { // CHECK-LABEL: @test_mm_mask_cvtepi32_pd - // CHECK: @llvm.x86.avx512.mask.cvtdq2pd.128 - return _mm_mask_cvtepi32_pd(__W,__U,__A); + // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1> + // CHECK: sitofp <2 x i32> %{{.*}} to <2 x double> + // CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}} + return _mm_mask_cvtepi32_pd(__W,__U,__A); } __m128d test_mm_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A) { // CHECK-LABEL: @test_mm_maskz_cvtepi32_pd - // CHECK: @llvm.x86.avx512.mask.cvtdq2pd.128 - return _mm_maskz_cvtepi32_pd(__U,__A); + // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1> + // CHECK: sitofp <2 x i32> %{{.*}} to <2 x double> + // CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}} + return _mm_maskz_cvtepi32_pd(__U,__A); } __m256d test_mm256_mask_cvtepi32_pd(__m256d __W, __mmask8 __U, __m128i __A) { // CHECK-LABEL: @test_mm256_mask_cvtepi32_pd - // CHECK: @llvm.x86.avx512.mask.cvtdq2pd.256 - return _mm256_mask_cvtepi32_pd(__W,__U,__A); + // CHECK: sitofp <4 x i32> %{{.*}} to <4 x double> + // CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}} + return _mm256_mask_cvtepi32_pd(__W,__U,__A); } __m256d test_mm256_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A) { // CHECK-LABEL: @test_mm256_maskz_cvtepi32_pd - // CHECK: @llvm.x86.avx512.mask.cvtdq2pd.256 - return _mm256_maskz_cvtepi32_pd(__U,__A); + // CHECK: sitofp <4 x i32> %{{.*}} to <4 x double> + // CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}} + return _mm256_maskz_cvtepi32_pd(__U,__A); } __m128 test_mm_mask_cvtepi32_ps(__m128 __W, __mmask8 __U, __m128i __A) { // CHECK-LABEL: @test_mm_mask_cvtepi32_ps @@ -2017,33 +2023,40 @@ } __m128d test_mm_cvtepu32_pd(__m128i __A) { // CHECK-LABEL: @test_mm_cvtepu32_pd - // CHECK: @llvm.x86.avx512.mask.cvtudq2pd.128 - return _mm_cvtepu32_pd(__A); + // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1> + // CHECK: uitofp <2 x i32> %{{.*}} to <2 x double> + return _mm_cvtepu32_pd(__A); } __m128d test_mm_mask_cvtepu32_pd(__m128d __W, __mmask8 __U, __m128i __A) { // CHECK-LABEL: @test_mm_mask_cvtepu32_pd - // CHECK: @llvm.x86.avx512.mask.cvtudq2pd.128 - return _mm_mask_cvtepu32_pd(__W,__U,__A); + // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1> + // CHECK: uitofp <2 x i32> %{{.*}} to <2 x double> + // CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}} + return _mm_mask_cvtepu32_pd(__W,__U,__A); } __m128d test_mm_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A) { // CHECK-LABEL: @test_mm_maskz_cvtepu32_pd - // CHECK: @llvm.x86.avx512.mask.cvtudq2pd.128 - return _mm_maskz_cvtepu32_pd(__U,__A); + // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i32> <i32 0, i32 1> + // CHECK: uitofp <2 x i32> %{{.*}} to <2 x double> + // CHECK: select <2 x i1> {{.*}}, <2 x double> {{.*}}, <2 x double> {{.*}} + return _mm_maskz_cvtepu32_pd(__U,__A); } __m256d test_mm256_cvtepu32_pd(__m128i __A) { // CHECK-LABEL: @test_mm256_cvtepu32_pd - // CHECK: @llvm.x86.avx512.mask.cvtudq2pd.256 - return _mm256_cvtepu32_pd(__A); + // CHECK: uitofp <4 x i32> %{{.*}} to <4 x double> + return _mm256_cvtepu32_pd(__A); } __m256d test_mm256_mask_cvtepu32_pd(__m256d __W, __mmask8 __U, __m128i __A) { // CHECK-LABEL: @test_mm256_mask_cvtepu32_pd - // CHECK: @llvm.x86.avx512.mask.cvtudq2pd.256 - return _mm256_mask_cvtepu32_pd(__W,__U,__A); + // CHECK: uitofp <4 x i32> %{{.*}} to <4 x double> + // CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}} + return _mm256_mask_cvtepu32_pd(__W,__U,__A); } __m256d test_mm256_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A) { // CHECK-LABEL: @test_mm256_maskz_cvtepu32_pd - // CHECK: @llvm.x86.avx512.mask.cvtudq2pd.256 - return _mm256_maskz_cvtepu32_pd(__U,__A); + // CHECK: uitofp <4 x i32> %{{.*}} to <4 x double> + // CHECK: select <4 x i1> {{.*}}, <4 x double> {{.*}}, <4 x double> {{.*}} + return _mm256_maskz_cvtepu32_pd(__U,__A); } __m128 test_mm_cvtepu32_ps(__m128i __A) { // CHECK-LABEL: @test_mm_cvtepu32_ps Index: test/CodeGen/avx512f-builtins.c =================================================================== --- test/CodeGen/avx512f-builtins.c +++ test/CodeGen/avx512f-builtins.c @@ -6951,31 +6951,34 @@ __m512d test_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A) { - // CHECK-LABEL: @test_mm512_mask_cvtepi32_pd - // CHECK: @llvm.x86.avx512.mask.cvtdq2pd.512 + // CHECK-LABEL: @test_mm512_mask_cvtepi32_pd + // CHECK: sitofp <8 x i32> %{{.*}} to <8 x double> + // CHECK: select <8 x i1> {{.*}}, <8 x double> {{.*}}, <8 x double> {{.*}} return _mm512_mask_cvtepi32_pd (__W,__U,__A); } __m512d test_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A) { - // CHECK-LABEL: @test_mm512_maskz_cvtepi32_pd - // CHECK: @llvm.x86.avx512.mask.cvtdq2pd.512 + // CHECK-LABEL: @test_mm512_maskz_cvtepi32_pd + // CHECK: sitofp <8 x i32> %{{.*}} to <8 x double> + // CHECK: select <8 x i1> {{.*}}, <8 x double> {{.*}}, <8 x double> {{.*}} return _mm512_maskz_cvtepi32_pd (__U,__A); } __m512d test_mm512_cvtepi32lo_pd (__m512i __A) { // CHECK-LABEL: @test_mm512_cvtepi32lo_pd // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3> - // CHECK: @llvm.x86.avx512.mask.cvtdq2pd.512 + // CHECK: sitofp <8 x i32> %{{.*}} to <8 x double> return _mm512_cvtepi32lo_pd (__A); } __m512d test_mm512_mask_cvtepi32lo_pd (__m512d __W, __mmask8 __U, __m512i __A) { // CHECK-LABEL: @test_mm512_mask_cvtepi32lo_pd // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3> - // CHECK: @llvm.x86.avx512.mask.cvtdq2pd.512 + // CHECK: sitofp <8 x i32> %{{.*}} to <8 x double> + // CHECK: select <8 x i1> {{.*}}, <8 x double> {{.*}}, <8 x double> {{.*}} return _mm512_mask_cvtepi32lo_pd (__W, __U, __A); } @@ -7000,33 +7003,43 @@ return _mm512_maskz_cvtepi32_ps (__U,__A); } +__m512d test_mm512_cvtepu32_pd(__m256i __A) +{ + // CHECK-LABEL: @test_mm512_cvtepu32_pd + // CHECK: uitofp <8 x i32> %{{.*}} to <8 x double> + return _mm512_cvtepu32_pd(__A); +} + __m512d test_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A) { - // CHECK-LABEL: @test_mm512_mask_cvtepu32_pd - // CHECK: @llvm.x86.avx512.mask.cvtudq2pd.512 + // CHECK-LABEL: @test_mm512_mask_cvtepu32_pd + // CHECK: uitofp <8 x i32> %{{.*}} to <8 x double> + // CHECK: select <8 x i1> {{.*}}, <8 x double> {{.*}}, <8 x double> {{.*}} return _mm512_mask_cvtepu32_pd (__W,__U,__A); } __m512d test_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A) { - // CHECK-LABEL: @test_mm512_maskz_cvtepu32_pd - // CHECK: @llvm.x86.avx512.mask.cvtudq2pd.512 + // CHECK-LABEL: @test_mm512_maskz_cvtepu32_pd + // CHECK: uitofp <8 x i32> %{{.*}} to <8 x double> + // CHECK: select <8 x i1> {{.*}}, <8 x double> {{.*}}, <8 x double> {{.*}} return _mm512_maskz_cvtepu32_pd (__U,__A); } __m512d test_mm512_cvtepu32lo_pd (__m512i __A) { // CHECK-LABEL: @test_mm512_cvtepu32lo_pd // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3> - // CHECK: @llvm.x86.avx512.mask.cvtudq2pd.512 + // CHECK: uitofp <8 x i32> %{{.*}} to <8 x double> return _mm512_cvtepu32lo_pd (__A); } __m512d test_mm512_mask_cvtepu32lo_pd (__m512d __W, __mmask8 __U, __m512i __A) { // CHECK-LABEL: @test_mm512_mask_cvtepu32lo_pd // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3> - // CHECK: @llvm.x86.avx512.mask.cvtudq2pd.512 + // CHECK: uitofp <8 x i32> %{{.*}} to <8 x double> + // CHECK: select <8 x i1> {{.*}}, <8 x double> {{.*}}, <8 x double> {{.*}} return _mm512_mask_cvtepu32lo_pd (__W, __U, __A); } Index: lib/Headers/avx512vlintrin.h =================================================================== --- lib/Headers/avx512vlintrin.h +++ lib/Headers/avx512vlintrin.h @@ -2134,32 +2134,30 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_cvtepi32_pd (__m128d __W, __mmask8 __U, __m128i __A) { - return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A, - (__v2df) __W, - (__mmask8) __U); + return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, + (__v2df)_mm_cvtepi32_pd(__A), + (__v2df)__W); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) { - return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A, - (__v2df) - _mm_setzero_pd (), - (__mmask8) __U); + return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, + (__v2df)_mm_cvtepi32_pd(__A), + (__v2df)_mm_setzero_pd()); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_mask_cvtepi32_pd (__m256d __W, __mmask8 __U, __m128i __A) { - return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A, - (__v4df) __W, - (__mmask8) __U); + return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, + (__v4df)_mm256_cvtepi32_pd(__A), + (__v4df)__W); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) { - return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A, - (__v4df) - _mm256_setzero_pd (), - (__mmask8) __U); + return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, + (__v4df)_mm256_cvtepi32_pd(__A), + (__v4df)_mm256_setzero_pd()); } static __inline__ __m128 __DEFAULT_FN_ATTRS @@ -2558,48 +2556,41 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtepu32_pd (__m128i __A) { - return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A, - (__v2df) - _mm_setzero_pd (), - (__mmask8) -1); + return (__m128d) __builtin_convertvector( + __builtin_shufflevector((__v4su)__A, (__v4su)__A, 0, 1), __v2df); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_cvtepu32_pd (__m128d __W, __mmask8 __U, __m128i __A) { - return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A, - (__v2df) __W, - (__mmask8) __U); + return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, + (__v2df)_mm_cvtepu32_pd(__A), + (__v2df)__W); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) { - return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A, - (__v2df) - _mm_setzero_pd (), - (__mmask8) __U); + return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, + (__v2df)_mm_cvtepu32_pd(__A), + (__v2df)_mm_setzero_pd()); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_cvtepu32_pd (__m128i __A) { - return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A, - (__v4df) - _mm256_setzero_pd (), - (__mmask8) -1); + return (__m256d)__builtin_convertvector((__v4su)__A, __v4df); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_mask_cvtepu32_pd (__m256d __W, __mmask8 __U, __m128i __A) { - return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A, - (__v4df) __W, - (__mmask8) __U); + return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, + (__v4df)_mm256_cvtepu32_pd(__A), + (__v4df)__W); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) { - return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A, - (__v4df) - _mm256_setzero_pd (), - (__mmask8) __U); + return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, + (__v4df)_mm256_cvtepu32_pd(__A), + (__v4df)_mm256_setzero_pd()); } static __inline__ __m128 __DEFAULT_FN_ATTRS Index: lib/Headers/avx512fintrin.h =================================================================== --- lib/Headers/avx512fintrin.h +++ lib/Headers/avx512fintrin.h @@ -3740,26 +3740,23 @@ static __inline __m512d __DEFAULT_FN_ATTRS _mm512_cvtepi32_pd(__m256i __A) { - return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A, - (__v8df) - _mm512_setzero_pd (), - (__mmask8) -1); + return (__m512d)__builtin_convertvector((__v8si)__A, __v8df); } static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A) { - return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A, - (__v8df) __W, - (__mmask8) __U); + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_cvtepi32_pd(__A), + (__v8df)__W); } static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A) { - return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A, - (__v8df) _mm512_setzero_pd (), - (__mmask8) __U); + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_cvtepi32_pd(__A), + (__v8df)_mm512_setzero_pd()); } static __inline__ __m512d __DEFAULT_FN_ATTRS @@ -3804,26 +3801,23 @@ static __inline __m512d __DEFAULT_FN_ATTRS _mm512_cvtepu32_pd(__m256i __A) { - return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A, - (__v8df) - _mm512_setzero_pd (), - (__mmask8) -1); + return (__m512d)__builtin_convertvector((__v8su)__A, __v8df); } static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A) { - return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A, - (__v8df) __W, - (__mmask8) __U); + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_cvtepu32_pd(__A), + (__v8df)__W); } static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A) { - return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A, - (__v8df) _mm512_setzero_pd (), - (__mmask8) __U); + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_cvtepu32_pd(__A), + (__v8df)_mm512_setzero_pd()); } static __inline__ __m512d __DEFAULT_FN_ATTRS Index: include/clang/Basic/BuiltinsX86.def =================================================================== --- include/clang/Basic/BuiltinsX86.def +++ include/clang/Basic/BuiltinsX86.def @@ -961,8 +961,6 @@ TARGET_BUILTIN(__builtin_ia32_maxpd512_mask, "V8dV8dV8dV8dUcIi", "", "avx512f") TARGET_BUILTIN(__builtin_ia32_cvtdq2ps512_mask, "V16fV16iV16fUsIi", "", "avx512f") TARGET_BUILTIN(__builtin_ia32_cvtudq2ps512_mask, "V16fV16iV16fUsIi", "", "avx512f") -TARGET_BUILTIN(__builtin_ia32_cvtdq2pd512_mask, "V8dV8iV8dUc", "", "avx512f") -TARGET_BUILTIN(__builtin_ia32_cvtudq2pd512_mask, "V8dV8iV8dUc", "", "avx512f") TARGET_BUILTIN(__builtin_ia32_cvtpd2ps512_mask, "V8fV8dV8fUcIi", "", "avx512f") TARGET_BUILTIN(__builtin_ia32_vcvtps2ph512_mask, "V16sV16fIiV16sUs", "", "avx512f") TARGET_BUILTIN(__builtin_ia32_vcvtph2ps512_mask, "V16fV16sV16fUsIi", "", "avx512f") @@ -1165,8 +1163,6 @@ TARGET_BUILTIN(__builtin_ia32_compressstoresf256_mask, "vV8f*V8fUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_compressstoresi128_mask, "vV4i*V4iUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_compressstoresi256_mask, "vV8i*V8iUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_cvtdq2pd128_mask, "V2dV4iV2dUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_cvtdq2pd256_mask, "V4dV4iV4dUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_cvtdq2ps128_mask, "V4fV4iV4fUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_cvtdq2ps256_mask, "V8fV8iV8fUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_cvtpd2dq128_mask, "V4iV2dV4iUc", "", "avx512vl") @@ -1189,8 +1185,6 @@ TARGET_BUILTIN(__builtin_ia32_cvttps2dq256_mask, "V8iV8fV8iUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_cvttps2udq128_mask, "V4iV4fV4iUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_cvttps2udq256_mask, "V8iV8fV8iUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_cvtudq2pd128_mask, "V2dV4iV2dUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_cvtudq2pd256_mask, "V4dV4iV4dUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_cvtudq2ps128_mask, "V4fV4iV4fUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_cvtudq2ps256_mask, "V8fV8iV8fUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_expanddf128_mask, "V2dV2dV2dUc", "", "avx512vl")
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits