Hi! A few other macros seem to suffer from the same issue. What I've done was: cat gcc/config/i386/*intrin.h | sed -e ':x /\\$/ { N; s/\\\n//g ; bx }' \ | grep '^[[:blank:]]*#[[:blank:]]*define[[:blank:]].*(' | sed 's/[ ]\+/ /g' \ > /tmp/macros and then looking for regexps: )[a-zA-Z] ) [a-zA-Z] [a-zA-Z][-+*/%] [a-zA-Z] [-+*/%] [-+*/%][a-zA-Z] [-+*/%] [a-zA-Z] in the resulting file.
Tested on x86_64-linux, ok for trunk if it passes full bootstrap/regtest on x86_64-linux and i686-linux? 2020-04-29 Jakub Jelinek <ja...@redhat.com> PR target/94832 * config/i386/avx512bwintrin.h (_mm512_alignr_epi8, _mm512_mask_alignr_epi8, _mm512_maskz_alignr_epi8): Wrap macro operands used in casts into parens. * config/i386/avx512fintrin.h (_mm512_cvt_roundps_ph, _mm512_cvtps_ph, _mm512_mask_cvt_roundps_ph, _mm512_mask_cvtps_ph, _mm512_maskz_cvt_roundps_ph, _mm512_maskz_cvtps_ph, _mm512_mask_cmp_epi64_mask, _mm512_mask_cmp_epi32_mask, _mm512_mask_cmp_epu64_mask, _mm512_mask_cmp_epu32_mask, _mm512_mask_cmp_round_pd_mask, _mm512_mask_cmp_round_ps_mask, _mm512_mask_cmp_pd_mask, _mm512_mask_cmp_ps_mask): Likewise. * config/i386/avx512vlbwintrin.h (_mm256_mask_alignr_epi8, _mm256_maskz_alignr_epi8, _mm_mask_alignr_epi8, _mm_maskz_alignr_epi8, _mm256_mask_cmp_epu8_mask): Likewise. * config/i386/avx512vlintrin.h (_mm_mask_cvtps_ph, _mm_maskz_cvtps_ph, _mm256_mask_cvtps_ph, _mm256_maskz_cvtps_ph): Likewise. * config/i386/f16cintrin.h (_mm_cvtps_ph, _mm256_cvtps_ph): Likewise. * config/i386/shaintrin.h (_mm_sha1rnds4_epu32): Likewise. --- gcc/config/i386/avx512vlintrin.h.jj 2020-04-29 11:16:27.671094124 +0200 +++ gcc/config/i386/avx512vlintrin.h 2020-04-29 11:52:30.746028151 +0200 @@ -13466,19 +13466,19 @@ _mm256_permutex_pd (__m256d __X, const i (__mmask8)(U))) #define _mm_mask_cvtps_ph(W, U, A, I) \ - ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) A, (int) (I), \ + ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) (A), (int) (I), \ (__v8hi)(__m128i) (W), (__mmask8) (U))) #define _mm_maskz_cvtps_ph(U, A, I) \ - ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) A, (int) (I), \ + ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) (A), (int) (I), \ (__v8hi)(__m128i) _mm_setzero_si128 (), (__mmask8) (U))) #define _mm256_mask_cvtps_ph(W, U, A, I) \ - ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) A, (int) (I), \ + ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) (A), (int) (I), \ (__v8hi)(__m128i) (W), (__mmask8) (U))) #define _mm256_maskz_cvtps_ph(U, A, I) \ - ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) A, (int) (I), \ + ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) (A), (int) (I), \ (__v8hi)(__m128i) _mm_setzero_si128 (), (__mmask8) (U))) #define _mm256_mask_srai_epi32(W, U, A, B) \ --- gcc/config/i386/avx512bwintrin.h.jj 2020-01-12 11:54:36.313414917 +0100 +++ gcc/config/i386/avx512bwintrin.h 2020-04-29 11:55:52.703026442 +0200 @@ -3128,16 +3128,16 @@ _mm512_bsrli_epi128 (__m512i __A, const #define _mm512_alignr_epi8(X, Y, N) \ ((__m512i) __builtin_ia32_palignr512 ((__v8di)(__m512i)(X), \ (__v8di)(__m512i)(Y), \ - (int)(N * 8))) + (int)((N) * 8))) #define _mm512_mask_alignr_epi8(W, U, X, Y, N) \ ((__m512i) __builtin_ia32_palignr512_mask ((__v8di)(__m512i)(X), \ - (__v8di)(__m512i)(Y), (int)(N * 8), \ + (__v8di)(__m512i)(Y), (int)((N) * 8), \ (__v8di)(__m512i)(W), (__mmask64)(U))) #define _mm512_maskz_alignr_epi8(U, X, Y, N) \ ((__m512i) __builtin_ia32_palignr512_mask ((__v8di)(__m512i)(X), \ - (__v8di)(__m512i)(Y), (int)(N * 8), \ + (__v8di)(__m512i)(Y), (int)((N) * 8), \ (__v8di)(__m512i) \ _mm512_setzero_si512 (), \ (__mmask64)(U))) --- gcc/config/i386/avx512vlbwintrin.h.jj 2020-01-12 11:54:36.315414887 +0100 +++ gcc/config/i386/avx512vlbwintrin.h 2020-04-29 11:56:45.766239901 +0200 @@ -1787,7 +1787,7 @@ _mm_maskz_slli_epi16 (__mmask8 __U, __m1 #else #define _mm256_mask_alignr_epi8(W, U, X, Y, N) \ ((__m256i) __builtin_ia32_palignr256_mask ((__v4di)(__m256i)(X), \ - (__v4di)(__m256i)(Y), (int)(N * 8), \ + (__v4di)(__m256i)(Y), (int)((N) * 8), \ (__v4di)(__m256i)(X), (__mmask32)(U))) #define _mm256_mask_srli_epi16(W, U, A, B) \ @@ -1864,18 +1864,18 @@ _mm_maskz_slli_epi16 (__mmask8 __U, __m1 #define _mm256_maskz_alignr_epi8(U, X, Y, N) \ ((__m256i) __builtin_ia32_palignr256_mask ((__v4di)(__m256i)(X), \ - (__v4di)(__m256i)(Y), (int)(N * 8), \ + (__v4di)(__m256i)(Y), (int)((N) * 8), \ (__v4di)(__m256i)_mm256_setzero_si256 (), \ (__mmask32)(U))) #define _mm_mask_alignr_epi8(W, U, X, Y, N) \ ((__m128i) __builtin_ia32_palignr128_mask ((__v2di)(__m128i)(X), \ - (__v2di)(__m128i)(Y), (int)(N * 8), \ + (__v2di)(__m128i)(Y), (int)((N) * 8), \ (__v2di)(__m128i)(X), (__mmask16)(U))) #define _mm_maskz_alignr_epi8(U, X, Y, N) \ ((__m128i) __builtin_ia32_palignr128_mask ((__v2di)(__m128i)(X), \ - (__v2di)(__m128i)(Y), (int)(N * 8), \ + (__v2di)(__m128i)(Y), (int)((N) * 8), \ (__v2di)(__m128i)_mm_setzero_si128 (), \ (__mmask16)(U))) @@ -2033,7 +2033,7 @@ _mm_maskz_slli_epi16 (__mmask8 __U, __m1 #define _mm256_mask_cmp_epu8_mask(M, X, Y, P) \ ((__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi)(__m256i)(X), \ (__v32qi)(__m256i)(Y), (int)(P),\ - (__mmask32)M)) + (__mmask32)(M))) #endif extern __inline __mmask32 --- gcc/config/i386/f16cintrin.h.jj 2020-01-12 11:54:36.319414827 +0100 +++ gcc/config/i386/f16cintrin.h 2020-04-29 11:53:44.278934179 +0200 @@ -84,10 +84,10 @@ _mm256_cvtps_ph (__m256 __A, const int _ })) #define _mm_cvtps_ph(A, I) \ - ((__m128i) __builtin_ia32_vcvtps2ph ((__v4sf)(__m128) A, (int) (I))) + ((__m128i) __builtin_ia32_vcvtps2ph ((__v4sf)(__m128) (A), (int) (I))) #define _mm256_cvtps_ph(A, I) \ - ((__m128i) __builtin_ia32_vcvtps2ph256 ((__v8sf)(__m256) A, (int) (I))) + ((__m128i) __builtin_ia32_vcvtps2ph256 ((__v8sf)(__m256) (A), (int) (I))) #endif /* __OPTIMIZE */ #ifdef __DISABLE_F16C__ --- gcc/config/i386/shaintrin.h.jj 2020-01-12 11:54:36.331414646 +0100 +++ gcc/config/i386/shaintrin.h 2020-04-29 11:50:27.934855249 +0200 @@ -64,8 +64,8 @@ _mm_sha1rnds4_epu32 (__m128i __A, __m128 } #else #define _mm_sha1rnds4_epu32(A, B, I) \ - ((__m128i) __builtin_ia32_sha1rnds4 ((__v4si)(__m128i)A, \ - (__v4si)(__m128i)B, (int)I)) + ((__m128i) __builtin_ia32_sha1rnds4 ((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), (int)(I))) #endif extern __inline __m128i --- gcc/config/i386/avx512fintrin.h.jj 2020-04-29 11:05:59.796380601 +0200 +++ gcc/config/i386/avx512fintrin.h 2020-04-29 11:51:37.925813976 +0200 @@ -8570,22 +8570,22 @@ _mm512_maskz_cvtps_ph (__mmask16 __W, __ (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(A), (__v16sf)_mm512_setzero_ps(), U, B) #define _mm512_cvt_roundps_ph(A, I) \ - ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ (__v16hi)_mm256_undefined_si256 (), -1)) #define _mm512_cvtps_ph(A, I) \ - ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ (__v16hi)_mm256_undefined_si256 (), -1)) #define _mm512_mask_cvt_roundps_ph(U, W, A, I) \ - ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ (__v16hi)(__m256i)(U), (__mmask16) (W))) #define _mm512_mask_cvtps_ph(U, W, A, I) \ - ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ (__v16hi)(__m256i)(U), (__mmask16) (W))) #define _mm512_maskz_cvt_roundps_ph(W, A, I) \ - ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ (__v16hi)_mm256_setzero_si256 (), (__mmask16) (W))) #define _mm512_maskz_cvtps_ph(W, A, I) \ - ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ (__v16hi)_mm256_setzero_si256 (), (__mmask16) (W))) #endif @@ -10081,32 +10081,32 @@ _mm_mask_cmp_round_ss_mask (__mmask8 __M #define _mm512_mask_cmp_epi64_mask(M, X, Y, P) \ ((__mmask8) __builtin_ia32_cmpq512_mask ((__v8di)(__m512i)(X), \ (__v8di)(__m512i)(Y), (int)(P),\ - (__mmask8)M)) + (__mmask8)(M))) #define _mm512_mask_cmp_epi32_mask(M, X, Y, P) \ ((__mmask16) __builtin_ia32_cmpd512_mask ((__v16si)(__m512i)(X), \ (__v16si)(__m512i)(Y), (int)(P), \ - (__mmask16)M)) + (__mmask16)(M))) #define _mm512_mask_cmp_epu64_mask(M, X, Y, P) \ ((__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di)(__m512i)(X), \ (__v8di)(__m512i)(Y), (int)(P),\ - (__mmask8)M)) + (__mmask8)(M))) #define _mm512_mask_cmp_epu32_mask(M, X, Y, P) \ ((__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si)(__m512i)(X), \ (__v16si)(__m512i)(Y), (int)(P), \ - (__mmask16)M)) + (__mmask16)(M))) #define _mm512_mask_cmp_round_pd_mask(M, X, Y, P, R) \ ((__mmask8) __builtin_ia32_cmppd512_mask ((__v8df)(__m512d)(X), \ (__v8df)(__m512d)(Y), (int)(P),\ - (__mmask8)M, R)) + (__mmask8)(M), R)) #define _mm512_mask_cmp_round_ps_mask(M, X, Y, P, R) \ ((__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf)(__m512)(X), \ (__v16sf)(__m512)(Y), (int)(P),\ - (__mmask16)M, R)) + (__mmask16)(M), R)) #define _mm_cmp_round_sd_mask(X, Y, P, R) \ ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \ @@ -15498,12 +15498,12 @@ _mm_mask_cmp_ss_mask (__mmask8 __M, __m1 #define _mm512_mask_cmp_pd_mask(M, X, Y, P) \ ((__mmask8) __builtin_ia32_cmppd512_mask ((__v8df)(__m512d)(X), \ (__v8df)(__m512d)(Y), (int)(P),\ - (__mmask8)M, _MM_FROUND_CUR_DIRECTION)) + (__mmask8)(M), _MM_FROUND_CUR_DIRECTION)) #define _mm512_mask_cmp_ps_mask(M, X, Y, P) \ ((__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf)(__m512)(X), \ (__v16sf)(__m512)(Y), (int)(P),\ - (__mmask16)M,_MM_FROUND_CUR_DIRECTION)) + (__mmask16)(M),_MM_FROUND_CUR_DIRECTION)) #define _mm_cmp_sd_mask(X, Y, P) \ ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \ Jakub