The intrinsic names for *[i|u]bs instructions in AVX10.2 are missing the required _ep[i|u]8 suffix.
This patch aims to fix the issue. gcc/ChangeLog: * config/i386/avx10_2-512satcvtintrin.h: Change *i[u]bs's type suffix of intrin name. * config/i386/avx10_2satcvtintrin.h: Ditto. gcc/testsuite/ChangeLog: * gcc.target/i386/avx10_2-512-satcvt-1.c: Modify intrin name. * gcc.target/i386/avx10_2-512-vcvtbf162ibs-2.c: Ditto. * gcc.target/i386/avx10_2-512-vcvtbf162iubs-2.c: Ditto. * gcc.target/i386/avx10_2-512-vcvtph2ibs-2.c: Ditto. * gcc.target/i386/avx10_2-512-vcvtph2iubs-2.c: Ditto. * gcc.target/i386/avx10_2-512-vcvtps2ibs-2.c: Ditto. * gcc.target/i386/avx10_2-512-vcvtps2iubs-2.c: Ditto. * gcc.target/i386/avx10_2-512-vcvttbf162ibs-2.c: Ditto. * gcc.target/i386/avx10_2-512-vcvttbf162iubs-2.c: Ditto. * gcc.target/i386/avx10_2-512-vcvttph2ibs-2.c: Ditto. * gcc.target/i386/avx10_2-512-vcvttph2iubs-2.c: Ditto. * gcc.target/i386/avx10_2-512-vcvttps2ibs-2.c: Ditto. * gcc.target/i386/avx10_2-512-vcvttps2iubs-2.c: Ditto. * gcc.target/i386/avx10_2-satcvt-1.c: Ditto. * gcc.target/i386/sse-14.c: Ditto. * gcc.target/i386/sse-22.c: Ditto. --- gcc/config/i386/avx10_2-512satcvtintrin.h | 152 +++++------ gcc/config/i386/avx10_2satcvtintrin.h | 236 +++++++++--------- .../gcc.target/i386/avx10_2-512-satcvt-1.c | 72 +++--- .../i386/avx10_2-512-vcvtbf162ibs-2.c | 6 +- .../i386/avx10_2-512-vcvtbf162iubs-2.c | 6 +- .../i386/avx10_2-512-vcvtph2ibs-2.c | 12 +- .../i386/avx10_2-512-vcvtph2iubs-2.c | 12 +- .../i386/avx10_2-512-vcvtps2ibs-2.c | 12 +- .../i386/avx10_2-512-vcvtps2iubs-2.c | 12 +- .../i386/avx10_2-512-vcvttbf162ibs-2.c | 6 +- .../i386/avx10_2-512-vcvttbf162iubs-2.c | 6 +- .../i386/avx10_2-512-vcvttph2ibs-2.c | 12 +- .../i386/avx10_2-512-vcvttph2iubs-2.c | 12 +- .../i386/avx10_2-512-vcvttps2ibs-2.c | 12 +- .../i386/avx10_2-512-vcvttps2iubs-2.c | 12 +- .../gcc.target/i386/avx10_2-satcvt-1.c | 144 +++++------ gcc/testsuite/gcc.target/i386/sse-14.c | 96 +++---- gcc/testsuite/gcc.target/i386/sse-22.c | 96 +++---- 18 files changed, 458 insertions(+), 458 deletions(-) diff --git a/gcc/config/i386/avx10_2-512satcvtintrin.h b/gcc/config/i386/avx10_2-512satcvtintrin.h index 6e864a9a6f8..a08f98c92a0 100644 --- a/gcc/config/i386/avx10_2-512satcvtintrin.h +++ b/gcc/config/i386/avx10_2-512satcvtintrin.h @@ -36,7 +36,7 @@ extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_ipcvtbf16_epi16 (__m512bh __A) +_mm512_ipcvtbf16_epi8 (__m512bh __A) { return (__m512i) __builtin_ia32_cvtbf162ibs512_mask ((__v32bf) __A, @@ -47,7 +47,7 @@ _mm512_ipcvtbf16_epi16 (__m512bh __A) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_mask_ipcvtbf16_epi16 (__m512i __W, __mmask32 __U, __m512bh __A) +_mm512_mask_ipcvtbf16_epi8 (__m512i __W, __mmask32 __U, __m512bh __A) { return (__m512i) __builtin_ia32_cvtbf162ibs512_mask ((__v32bf) __A, (__v32hi) __W, @@ -56,7 +56,7 @@ _mm512_mask_ipcvtbf16_epi16 (__m512i __W, __mmask32 __U, __m512bh __A) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_maskz_ipcvtbf16_epi16 (__mmask32 __U, __m512bh __A) +_mm512_maskz_ipcvtbf16_epi8 (__mmask32 __U, __m512bh __A) { return (__m512i) __builtin_ia32_cvtbf162ibs512_mask ((__v32bf) __A, @@ -67,7 +67,7 @@ _mm512_maskz_ipcvtbf16_epi16 (__mmask32 __U, __m512bh __A) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_ipcvtbf16_epu16 (__m512bh __A) +_mm512_ipcvtbf16_epu8 (__m512bh __A) { return (__m512i) __builtin_ia32_cvtbf162iubs512_mask ((__v32bf) __A, @@ -78,7 +78,7 @@ _mm512_ipcvtbf16_epu16 (__m512bh __A) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_mask_ipcvtbf16_epu16 (__m512i __W, __mmask32 __U, __m512bh __A) +_mm512_mask_ipcvtbf16_epu8 (__m512i __W, __mmask32 __U, __m512bh __A) { return (__m512i) __builtin_ia32_cvtbf162iubs512_mask ((__v32bf) __A, (__v32hi) __W, @@ -87,7 +87,7 @@ _mm512_mask_ipcvtbf16_epu16 (__m512i __W, __mmask32 __U, __m512bh __A) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_maskz_ipcvtbf16_epu16 (__mmask32 __U, __m512bh __A) +_mm512_maskz_ipcvtbf16_epu8 (__mmask32 __U, __m512bh __A) { return (__m512i) __builtin_ia32_cvtbf162iubs512_mask ((__v32bf) __A, @@ -98,7 +98,7 @@ _mm512_maskz_ipcvtbf16_epu16 (__mmask32 __U, __m512bh __A) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_ipcvttbf16_epi16 (__m512bh __A) +_mm512_ipcvttbf16_epi8 (__m512bh __A) { return (__m512i) __builtin_ia32_cvttbf162ibs512_mask ((__v32bf) __A, @@ -109,7 +109,7 @@ _mm512_ipcvttbf16_epi16 (__m512bh __A) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_mask_ipcvttbf16_epi16 (__m512i __W, __mmask32 __U, __m512bh __A) +_mm512_mask_ipcvttbf16_epi8 (__m512i __W, __mmask32 __U, __m512bh __A) { return (__m512i) __builtin_ia32_cvttbf162ibs512_mask ((__v32bf) __A, (__v32hi) __W, @@ -118,7 +118,7 @@ _mm512_mask_ipcvttbf16_epi16 (__m512i __W, __mmask32 __U, __m512bh __A) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_maskz_ipcvttbf16_epi16 (__mmask32 __U, __m512bh __A) +_mm512_maskz_ipcvttbf16_epi8 (__mmask32 __U, __m512bh __A) { return (__m512i) __builtin_ia32_cvttbf162ibs512_mask ((__v32bf) __A, @@ -129,7 +129,7 @@ _mm512_maskz_ipcvttbf16_epi16 (__mmask32 __U, __m512bh __A) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_ipcvttbf16_epu16 (__m512bh __A) +_mm512_ipcvttbf16_epu8 (__m512bh __A) { return (__m512i) __builtin_ia32_cvttbf162iubs512_mask ((__v32bf) __A, @@ -139,7 +139,7 @@ _mm512_ipcvttbf16_epu16 (__m512bh __A) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_mask_ipcvttbf16_epu16 (__m512i __W, __mmask32 __U, __m512bh __A) +_mm512_mask_ipcvttbf16_epu8 (__m512i __W, __mmask32 __U, __m512bh __A) { return (__m512i) __builtin_ia32_cvttbf162iubs512_mask ((__v32bf) __A, (__v32hi) __W, @@ -148,7 +148,7 @@ _mm512_mask_ipcvttbf16_epu16 (__m512i __W, __mmask32 __U, __m512bh __A) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_maskz_ipcvttbf16_epu16 (__mmask32 __U, __m512bh __A) +_mm512_maskz_ipcvttbf16_epu8 (__mmask32 __U, __m512bh __A) { return (__m512i) __builtin_ia32_cvttbf162iubs512_mask ((__v32bf) __A, @@ -160,7 +160,7 @@ _mm512_maskz_ipcvttbf16_epu16 (__mmask32 __U, __m512bh __A) #ifdef __OPTIMIZE__ extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_ipcvt_roundph_epi16 (__m512h __A, const int __R) +_mm512_ipcvt_roundph_epi8 (__m512h __A, const int __R) { return (__m512i) __builtin_ia32_cvtph2ibs512_mask_round ((__v32hf) __A, @@ -172,8 +172,8 @@ _mm512_ipcvt_roundph_epi16 (__m512h __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_mask_ipcvt_roundph_epi16 (__m512i __W, __mmask32 __U, __m512h __A, - const int __R) +_mm512_mask_ipcvt_roundph_epi8 (__m512i __W, __mmask32 __U, __m512h __A, + const int __R) { return (__m512i) __builtin_ia32_cvtph2ibs512_mask_round ((__v32hf) __A, (__v32hi) __W, @@ -183,7 +183,7 @@ _mm512_mask_ipcvt_roundph_epi16 (__m512i __W, __mmask32 __U, __m512h __A, extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_maskz_ipcvt_roundph_epi16 (__mmask32 __U, __m512h __A, const int __R) +_mm512_maskz_ipcvt_roundph_epi8 (__mmask32 __U, __m512h __A, const int __R) { return (__m512i) __builtin_ia32_cvtph2ibs512_mask_round ((__v32hf) __A, @@ -195,7 +195,7 @@ _mm512_maskz_ipcvt_roundph_epi16 (__mmask32 __U, __m512h __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_ipcvt_roundph_epu16 (__m512h __A, const int __R) +_mm512_ipcvt_roundph_epu8 (__m512h __A, const int __R) { return (__m512i) __builtin_ia32_cvtph2iubs512_mask_round ((__v32hf) __A, @@ -207,8 +207,8 @@ _mm512_ipcvt_roundph_epu16 (__m512h __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_mask_ipcvt_roundph_epu16 (__m512i __W, __mmask32 __U, __m512h __A, - const int __R) +_mm512_mask_ipcvt_roundph_epu8 (__m512i __W, __mmask32 __U, __m512h __A, + const int __R) { return (__m512i) __builtin_ia32_cvtph2iubs512_mask_round ((__v32hf) __A, (__v32hi) __W, @@ -218,7 +218,7 @@ _mm512_mask_ipcvt_roundph_epu16 (__m512i __W, __mmask32 __U, __m512h __A, extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_maskz_ipcvt_roundph_epu16 (__mmask32 __U, __m512h __A, const int __R) +_mm512_maskz_ipcvt_roundph_epu8 (__mmask32 __U, __m512h __A, const int __R) { return (__m512i) __builtin_ia32_cvtph2iubs512_mask_round ((__v32hf) __A, @@ -230,7 +230,7 @@ _mm512_maskz_ipcvt_roundph_epu16 (__mmask32 __U, __m512h __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_ipcvt_roundps_epi32 (__m512 __A, const int __R) +_mm512_ipcvt_roundps_epi8 (__m512 __A, const int __R) { return (__m512i) __builtin_ia32_cvtps2ibs512_mask_round ((__v16sf) __A, @@ -242,8 +242,8 @@ _mm512_ipcvt_roundps_epi32 (__m512 __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_mask_ipcvt_roundps_epi32 (__m512i __W, __mmask16 __U, __m512 __A, - const int __R) +_mm512_mask_ipcvt_roundps_epi8 (__m512i __W, __mmask16 __U, __m512 __A, + const int __R) { return (__m512i) __builtin_ia32_cvtps2ibs512_mask_round ((__v16sf) __A, (__v16si) __W, @@ -253,7 +253,7 @@ _mm512_mask_ipcvt_roundps_epi32 (__m512i __W, __mmask16 __U, __m512 __A, extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_maskz_ipcvt_roundps_epi32 (__mmask16 __U, __m512 __A, const int __R) +_mm512_maskz_ipcvt_roundps_epi8 (__mmask16 __U, __m512 __A, const int __R) { return (__m512i) __builtin_ia32_cvtps2ibs512_mask_round ((__v16sf) __A, @@ -265,7 +265,7 @@ _mm512_maskz_ipcvt_roundps_epi32 (__mmask16 __U, __m512 __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_ipcvt_roundps_epu32 (__m512 __A, const int __R) +_mm512_ipcvt_roundps_epu8 (__m512 __A, const int __R) { return (__m512i) __builtin_ia32_cvtps2iubs512_mask_round ((__v16sf) __A, @@ -277,8 +277,8 @@ _mm512_ipcvt_roundps_epu32 (__m512 __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_mask_ipcvt_roundps_epu32 (__m512i __W, __mmask16 __U, __m512 __A, - const int __R) +_mm512_mask_ipcvt_roundps_epu8 (__m512i __W, __mmask16 __U, __m512 __A, + const int __R) { return (__m512i) __builtin_ia32_cvtps2iubs512_mask_round ((__v16sf) __A, (__v16si) __W, @@ -288,7 +288,7 @@ _mm512_mask_ipcvt_roundps_epu32 (__m512i __W, __mmask16 __U, __m512 __A, extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_maskz_ipcvt_roundps_epu32 (__mmask16 __U, __m512 __A, const int __R) +_mm512_maskz_ipcvt_roundps_epu8 (__mmask16 __U, __m512 __A, const int __R) { return (__m512i) __builtin_ia32_cvtps2iubs512_mask_round ((__v16sf) __A, @@ -300,7 +300,7 @@ _mm512_maskz_ipcvt_roundps_epu32 (__mmask16 __U, __m512 __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_ipcvtt_roundph_epi16 (__m512h __A, const int __R) +_mm512_ipcvtt_roundph_epi8 (__m512h __A, const int __R) { return (__m512i) __builtin_ia32_cvttph2ibs512_mask_round ((__v32hf) __A, @@ -312,8 +312,8 @@ _mm512_ipcvtt_roundph_epi16 (__m512h __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_mask_ipcvtt_roundph_epi16 (__m512i __W, __mmask32 __U, __m512h __A, - const int __R) +_mm512_mask_ipcvtt_roundph_epi8 (__m512i __W, __mmask32 __U, __m512h __A, + const int __R) { return (__m512i) __builtin_ia32_cvttph2ibs512_mask_round ((__v32hf) __A, (__v32hi) __W, @@ -323,7 +323,7 @@ _mm512_mask_ipcvtt_roundph_epi16 (__m512i __W, __mmask32 __U, __m512h __A, extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_maskz_ipcvtt_roundph_epi16 (__mmask32 __U, __m512h __A, const int __R) +_mm512_maskz_ipcvtt_roundph_epi8 (__mmask32 __U, __m512h __A, const int __R) { return (__m512i) __builtin_ia32_cvttph2ibs512_mask_round ((__v32hf) __A, @@ -335,7 +335,7 @@ _mm512_maskz_ipcvtt_roundph_epi16 (__mmask32 __U, __m512h __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_ipcvtt_roundph_epu16 (__m512h __A, const int __R) +_mm512_ipcvtt_roundph_epu8 (__m512h __A, const int __R) { return (__m512i) __builtin_ia32_cvttph2iubs512_mask_round ((__v32hf) __A, @@ -347,8 +347,8 @@ _mm512_ipcvtt_roundph_epu16 (__m512h __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_mask_ipcvtt_roundph_epu16 (__m512i __W, __mmask32 __U, __m512h __A, - const int __R) +_mm512_mask_ipcvtt_roundph_epu8 (__m512i __W, __mmask32 __U, __m512h __A, + const int __R) { return (__m512i) __builtin_ia32_cvttph2iubs512_mask_round ((__v32hf) __A, (__v32hi) __W, @@ -358,7 +358,7 @@ _mm512_mask_ipcvtt_roundph_epu16 (__m512i __W, __mmask32 __U, __m512h __A, extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_maskz_ipcvtt_roundph_epu16 (__mmask32 __U, __m512h __A, const int __R) +_mm512_maskz_ipcvtt_roundph_epu8 (__mmask32 __U, __m512h __A, const int __R) { return (__m512i) __builtin_ia32_cvttph2iubs512_mask_round ((__v32hf) __A, @@ -370,7 +370,7 @@ _mm512_maskz_ipcvtt_roundph_epu16 (__mmask32 __U, __m512h __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_ipcvtt_roundps_epi32 (__m512 __A, const int __R) +_mm512_ipcvtt_roundps_epi8 (__m512 __A, const int __R) { return (__m512i) __builtin_ia32_cvttps2ibs512_mask_round ((__v16sf) __A, @@ -382,8 +382,8 @@ _mm512_ipcvtt_roundps_epi32 (__m512 __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_mask_ipcvtt_roundps_epi32 (__m512i __W, __mmask16 __U, __m512 __A, - const int __R) +_mm512_mask_ipcvtt_roundps_epi8 (__m512i __W, __mmask16 __U, __m512 __A, + const int __R) { return (__m512i) __builtin_ia32_cvttps2ibs512_mask_round ((__v16sf) __A, (__v16si) __W, @@ -393,7 +393,7 @@ _mm512_mask_ipcvtt_roundps_epi32 (__m512i __W, __mmask16 __U, __m512 __A, extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_maskz_ipcvtt_roundps_epi32 (__mmask16 __U, __m512 __A, const int __R) +_mm512_maskz_ipcvtt_roundps_epi8 (__mmask16 __U, __m512 __A, const int __R) { return (__m512i) __builtin_ia32_cvttps2ibs512_mask_round ((__v16sf) __A, @@ -405,7 +405,7 @@ _mm512_maskz_ipcvtt_roundps_epi32 (__mmask16 __U, __m512 __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_ipcvtt_roundps_epu32 (__m512 __A, const int __R) +_mm512_ipcvtt_roundps_epu8 (__m512 __A, const int __R) { return (__m512i) __builtin_ia32_cvttps2iubs512_mask_round ((__v16sf) __A, @@ -417,8 +417,8 @@ _mm512_ipcvtt_roundps_epu32 (__m512 __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_mask_ipcvtt_roundps_epu32 (__m512i __W, __mmask16 __U, __m512 __A, - const int __R) +_mm512_mask_ipcvtt_roundps_epu8 (__m512i __W, __mmask16 __U, __m512 __A, + const int __R) { return (__m512i) __builtin_ia32_cvttps2iubs512_mask_round ((__v16sf) __A, (__v16si) __W, @@ -428,7 +428,7 @@ _mm512_mask_ipcvtt_roundps_epu32 (__m512i __W, __mmask16 __U, __m512 __A, extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm512_maskz_ipcvtt_roundps_epu32 (__mmask16 __U, __m512 __A, const int __R) +_mm512_maskz_ipcvtt_roundps_epu8 (__mmask16 __U, __m512 __A, const int __R) { return (__m512i) __builtin_ia32_cvttps2iubs512_mask_round ((__v16sf) __A, @@ -453,7 +453,7 @@ _mm512_cvtts_roundpd_epi32 (__m512d __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) _mm512_mask_cvtts_roundpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A, - const int __R) + const int __R) { return (__m256i) __builtin_ia32_cvttpd2dqs512_mask_round ((__v8df) __A, (__v8si) __W, @@ -488,7 +488,7 @@ _mm512_cvtts_roundpd_epi64 (__m512d __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) _mm512_mask_cvtts_roundpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A, - const int __R) + const int __R) { return (__m512i) __builtin_ia32_cvttpd2qqs512_mask_round ((__v8df) __A, (__v8di) __W, @@ -523,7 +523,7 @@ _mm512_cvtts_roundpd_epu32 (__m512d __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) _mm512_mask_cvtts_roundpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A, - const int __R) + const int __R) { return (__m256i) __builtin_ia32_cvttpd2udqs512_mask_round ((__v8df) __A, (__v8si) __W, @@ -558,7 +558,7 @@ _mm512_cvtts_roundpd_epu64 (__m512d __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) _mm512_mask_cvtts_roundpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A, - const int __R) + const int __R) { return (__m512i) __builtin_ia32_cvttpd2uqqs512_mask_round ((__v8df) __A, (__v8di) __W, @@ -593,7 +593,7 @@ _mm512_cvtts_roundps_epi32 (__m512 __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) _mm512_mask_cvtts_roundps_epi32 (__m512i __W, __mmask16 __U, __m512 __A, - const int __R) + const int __R) { return (__m512i) __builtin_ia32_cvttps2dqs512_mask_round ((__v16sf) __A, (__v16si) __W, @@ -628,7 +628,7 @@ _mm512_cvtts_roundps_epi64 (__m256 __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) _mm512_mask_cvtts_roundps_epi64 (__m512i __W, __mmask8 __U, __m256 __A, - const int __R) + const int __R) { return (__m512i) __builtin_ia32_cvttps2qqs512_mask_round ((__v8sf) __A, (__v8di) __W, @@ -663,7 +663,7 @@ _mm512_cvtts_roundps_epu32 (__m512 __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) _mm512_mask_cvtts_roundps_epu32 (__m512i __W, __mmask16 __U, __m512 __A, - const int __R) + const int __R) { return (__m512i) __builtin_ia32_cvttps2udqs512_mask_round ((__v16sf) __A, (__v16si) __W, @@ -698,7 +698,7 @@ _mm512_cvtts_roundps_epu64 (__m256 __A, const int __R) extern __inline __m512i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) _mm512_mask_cvtts_roundps_epu64 (__m512i __W, __mmask8 __U, __m256 __A, - const int __R) + const int __R) { return (__m512i) __builtin_ia32_cvttps2uqqs512_mask_round ((__v8sf) __A, (__v8di) __W, @@ -718,7 +718,7 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R) __R); } #else -#define _mm512_ipcvt_roundph_epi16(A, R) \ +#define _mm512_ipcvt_roundph_epi8(A, R) \ ((__m512i) \ __builtin_ia32_cvtph2ibs512_mask_round ((__v32hf) (A), \ (__v32hi) \ @@ -726,13 +726,13 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R) (__mmask32) (-1), \ (R))) -#define _mm512_mask_ipcvt_roundph_epi16(W, U, A, R) \ +#define _mm512_mask_ipcvt_roundph_epi8(W, U, A, R) \ ((__m512i) __builtin_ia32_cvtph2ibs512_mask_round ((__v32hf) (A), \ (__v32hi) (W), \ (__mmask32) (U), \ (R))) -#define _mm512_maskz_ipcvt_roundph_epi16(U, A, R) \ +#define _mm512_maskz_ipcvt_roundph_epi8(U, A, R) \ ((__m512i) \ __builtin_ia32_cvtph2ibs512_mask_round ((__v32hf) (A), \ (__v32hi) \ @@ -740,7 +740,7 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R) (__mmask32) (U), \ (R))) -#define _mm512_ipcvt_roundph_epu16(A, R) \ +#define _mm512_ipcvt_roundph_epu8(A, R) \ ((__m512i) \ __builtin_ia32_cvtph2iubs512_mask_round ((__v32hf) (A), \ (__v32hi) \ @@ -748,13 +748,13 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R) (__mmask32) (-1), \ (R))) -#define _mm512_mask_ipcvt_roundph_epu16(W, U, A, R) \ +#define _mm512_mask_ipcvt_roundph_epu8(W, U, A, R) \ ((__m512i) __builtin_ia32_cvtph2iubs512_mask_round ((__v32hf) (A), \ (__v32hi) (W), \ (__mmask32) (U), \ (R))) -#define _mm512_maskz_ipcvt_roundph_epu16(U, A, R) \ +#define _mm512_maskz_ipcvt_roundph_epu8(U, A, R) \ ((__m512i) \ __builtin_ia32_cvtph2iubs512_mask_round ((__v32hf) (A), \ (__v32hi) \ @@ -762,7 +762,7 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R) (__mmask32) (U), \ (R))) -#define _mm512_ipcvt_roundps_epi32(A, R) \ +#define _mm512_ipcvt_roundps_epi8(A, R) \ ((__m512i) \ __builtin_ia32_cvtps2ibs512_mask_round ((__v16sf) (A), \ (__v16si) \ @@ -770,13 +770,13 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R) (__mmask16) (-1), \ (R))) -#define _mm512_mask_ipcvt_roundps_epi32(W, U, A, R) \ +#define _mm512_mask_ipcvt_roundps_epi8(W, U, A, R) \ ((__m512i) __builtin_ia32_cvtps2ibs512_mask_round ((__v16sf) (A), \ (__v16si) (W), \ (__mmask16) (U), \ (R))) -#define _mm512_maskz_ipcvt_roundps_epi32(U, A, R) \ +#define _mm512_maskz_ipcvt_roundps_epi8(U, A, R) \ ((__m512i) \ __builtin_ia32_cvtps2ibs512_mask_round ((__v16sf) (A), \ (__v16si) \ @@ -784,7 +784,7 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R) (__mmask16) (U), \ (R))) -#define _mm512_ipcvt_roundps_epu32(A, R) \ +#define _mm512_ipcvt_roundps_epu8(A, R) \ ((__m512i) \ __builtin_ia32_cvtps2iubs512_mask_round ((__v16sf) (A), \ (__v16si) \ @@ -792,13 +792,13 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R) (__mmask16) (-1), \ (R))) -#define _mm512_mask_ipcvt_roundps_epu32(W, U, A, R) \ +#define _mm512_mask_ipcvt_roundps_epu8(W, U, A, R) \ ((__m512i) __builtin_ia32_cvtps2iubs512_mask_round ((__v16sf) (A), \ (__v16si) (W), \ (__mmask16) (U), \ (R))) -#define _mm512_maskz_ipcvt_roundps_epu32(U, A, R) \ +#define _mm512_maskz_ipcvt_roundps_epu8(U, A, R) \ ((__m512i) \ __builtin_ia32_cvtps2iubs512_mask_round ((__v16sf) (A), \ (__v16si) \ @@ -806,7 +806,7 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R) (__mmask16) (U), \ (R))) -#define _mm512_ipcvtt_roundph_epi16(A, R) \ +#define _mm512_ipcvtt_roundph_epi8(A, R) \ ((__m512i) \ __builtin_ia32_cvttph2ibs512_mask_round ((__v32hf) (A), \ (__v32hi) \ @@ -814,13 +814,13 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R) (__mmask32) (-1), \ (R))) -#define _mm512_mask_ipcvtt_roundph_epi16(W, U, A, R) \ +#define _mm512_mask_ipcvtt_roundph_epi8(W, U, A, R) \ ((__m512i) __builtin_ia32_cvttph2ibs512_mask_round ((__v32hf) (A), \ (__v32hi) (W), \ (__mmask32) (U), \ (R))) -#define _mm512_maskz_ipcvtt_roundph_epi16(U, A, R) \ +#define _mm512_maskz_ipcvtt_roundph_epi8(U, A, R) \ ((__m512i) \ __builtin_ia32_cvttph2ibs512_mask_round ((__v32hf) (A), \ (__v32hi) \ @@ -828,7 +828,7 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R) (__mmask32) (U), \ (R))) -#define _mm512_ipcvtt_roundph_epu16(A, R) \ +#define _mm512_ipcvtt_roundph_epu8(A, R) \ ((__m512i) \ __builtin_ia32_cvttph2iubs512_mask_round ((__v32hf) (A), \ (__v32hi) \ @@ -836,13 +836,13 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R) (__mmask32) (-1), \ (R))) -#define _mm512_mask_ipcvtt_roundph_epu16(W, U, A, R) \ +#define _mm512_mask_ipcvtt_roundph_epu8(W, U, A, R) \ ((__m512i) __builtin_ia32_cvttph2iubs512_mask_round ((__v32hf) (A), \ (__v32hi) (W), \ (__mmask32) (U), \ (R))) -#define _mm512_maskz_ipcvtt_roundph_epu16(U, A, R) \ +#define _mm512_maskz_ipcvtt_roundph_epu8(U, A, R) \ ((__m512i) \ __builtin_ia32_cvttph2iubs512_mask_round ((__v32hf) (A), \ (__v32hi) \ @@ -850,7 +850,7 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R) (__mmask32) (U), \ (R))) -#define _mm512_ipcvtt_roundps_epi32(A, R) \ +#define _mm512_ipcvtt_roundps_epi8(A, R) \ ((__m512i) \ __builtin_ia32_cvttps2ibs512_mask_round ((__v16sf) (A), \ (__v16si) \ @@ -858,13 +858,13 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R) (__mmask16) (-1), \ (R))) -#define _mm512_mask_ipcvtt_roundps_epi32(W, U, A, R) \ +#define _mm512_mask_ipcvtt_roundps_epi8(W, U, A, R) \ ((__m512i) __builtin_ia32_cvttps2ibs512_mask_round ((__v16sf) (A), \ (__v16si) (W), \ (__mmask16) (U), \ (R))) -#define _mm512_maskz_ipcvtt_roundps_epi32(U, A, R) \ +#define _mm512_maskz_ipcvtt_roundps_epi8(U, A, R) \ ((__m512i) \ __builtin_ia32_cvttps2ibs512_mask_round ((__v16sf) (A), \ (__v16si) \ @@ -872,7 +872,7 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R) (__mmask16) (U), \ (R))) -#define _mm512_ipcvtt_roundps_epu32(A, R) \ +#define _mm512_ipcvtt_roundps_epu8(A, R) \ ((__m512i) \ __builtin_ia32_cvttps2iubs512_mask_round ((__v16sf) (A), \ (__v16si) \ @@ -880,13 +880,13 @@ _mm512_maskz_cvtts_roundps_epu64 (__mmask8 __U, __m256 __A, const int __R) (__mmask16) (-1), \ (R))) -#define _mm512_mask_ipcvtt_roundps_epu32(W, U, A, R) \ +#define _mm512_mask_ipcvtt_roundps_epu8(W, U, A, R) \ ((__m512i) __builtin_ia32_cvttps2iubs512_mask_round ((__v16sf) (A), \ (__v16si) (W), \ (__mmask16) (U), \ (R))) -#define _mm512_maskz_ipcvtt_roundps_epu32(U, A, R) \ +#define _mm512_maskz_ipcvtt_roundps_epu8(U, A, R) \ ((__m512i) \ __builtin_ia32_cvttps2iubs512_mask_round ((__v16sf) (A), \ (__v16si) \ diff --git a/gcc/config/i386/avx10_2satcvtintrin.h b/gcc/config/i386/avx10_2satcvtintrin.h index ece1304bcfa..c389f1a2728 100644 --- a/gcc/config/i386/avx10_2satcvtintrin.h +++ b/gcc/config/i386/avx10_2satcvtintrin.h @@ -36,7 +36,7 @@ extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_ipcvtbf16_epi16 (__m128bh __A) +_mm_ipcvtbf16_epi8 (__m128bh __A) { return (__m128i) __builtin_ia32_cvtbf162ibs128_mask ((__v8bf) __A, (__v8hi) @@ -46,7 +46,7 @@ _mm_ipcvtbf16_epi16 (__m128bh __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_mask_ipcvtbf16_epi16 (__m128i __W, __mmask8 __U, __m128bh __A) +_mm_mask_ipcvtbf16_epi8 (__m128i __W, __mmask8 __U, __m128bh __A) { return (__m128i) __builtin_ia32_cvtbf162ibs128_mask ((__v8bf) __A, (__v8hi) __W, @@ -55,7 +55,7 @@ _mm_mask_ipcvtbf16_epi16 (__m128i __W, __mmask8 __U, __m128bh __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_maskz_ipcvtbf16_epi16 (__mmask8 __U, __m128bh __A) +_mm_maskz_ipcvtbf16_epi8 (__mmask8 __U, __m128bh __A) { return (__m128i) __builtin_ia32_cvtbf162ibs128_mask ((__v8bf) __A, (__v8hi) @@ -65,7 +65,7 @@ _mm_maskz_ipcvtbf16_epi16 (__mmask8 __U, __m128bh __A) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_ipcvtbf16_epi16 (__m256bh __A) +_mm256_ipcvtbf16_epi8 (__m256bh __A) { return (__m256i) __builtin_ia32_cvtbf162ibs256_mask ((__v16bf) __A, @@ -76,7 +76,7 @@ _mm256_ipcvtbf16_epi16 (__m256bh __A) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_ipcvtbf16_epi16 (__m256i __W, __mmask16 __U, __m256bh __A) +_mm256_mask_ipcvtbf16_epi8 (__m256i __W, __mmask16 __U, __m256bh __A) { return (__m256i) __builtin_ia32_cvtbf162ibs256_mask ((__v16bf) __A, (__v16hi) __W, @@ -85,7 +85,7 @@ _mm256_mask_ipcvtbf16_epi16 (__m256i __W, __mmask16 __U, __m256bh __A) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_maskz_ipcvtbf16_epi16 (__mmask16 __U, __m256bh __A) +_mm256_maskz_ipcvtbf16_epi8 (__mmask16 __U, __m256bh __A) { return (__m256i) __builtin_ia32_cvtbf162ibs256_mask ((__v16bf) __A, @@ -96,7 +96,7 @@ _mm256_maskz_ipcvtbf16_epi16 (__mmask16 __U, __m256bh __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_ipcvtbf16_epu16 (__m128bh __A) +_mm_ipcvtbf16_epu8 (__m128bh __A) { return (__m128i) __builtin_ia32_cvtbf162iubs128_mask ((__v8bf) __A, @@ -107,7 +107,7 @@ _mm_ipcvtbf16_epu16 (__m128bh __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_mask_ipcvtbf16_epu16 (__m128i __W, __mmask8 __U, __m128bh __A) +_mm_mask_ipcvtbf16_epu8 (__m128i __W, __mmask8 __U, __m128bh __A) { return (__m128i) __builtin_ia32_cvtbf162iubs128_mask ((__v8bf) __A, (__v8hi) __W, @@ -116,7 +116,7 @@ _mm_mask_ipcvtbf16_epu16 (__m128i __W, __mmask8 __U, __m128bh __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_maskz_ipcvtbf16_epu16 (__mmask8 __U, __m128bh __A) +_mm_maskz_ipcvtbf16_epu8 (__mmask8 __U, __m128bh __A) { return (__m128i) __builtin_ia32_cvtbf162iubs128_mask ((__v8bf) __A, @@ -127,7 +127,7 @@ _mm_maskz_ipcvtbf16_epu16 (__mmask8 __U, __m128bh __A) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_ipcvtbf16_epu16 (__m256bh __A) +_mm256_ipcvtbf16_epu8 (__m256bh __A) { return (__m256i) __builtin_ia32_cvtbf162iubs256_mask ((__v16bf) __A, @@ -138,7 +138,7 @@ _mm256_ipcvtbf16_epu16 (__m256bh __A) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_ipcvtbf16_epu16 (__m256i __W, __mmask16 __U, __m256bh __A) +_mm256_mask_ipcvtbf16_epu8 (__m256i __W, __mmask16 __U, __m256bh __A) { return (__m256i) __builtin_ia32_cvtbf162iubs256_mask ((__v16bf) __A, (__v16hi) __W, @@ -147,7 +147,7 @@ _mm256_mask_ipcvtbf16_epu16 (__m256i __W, __mmask16 __U, __m256bh __A) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_maskz_ipcvtbf16_epu16 (__mmask16 __U, __m256bh __A) +_mm256_maskz_ipcvtbf16_epu8 (__mmask16 __U, __m256bh __A) { return (__m256i) __builtin_ia32_cvtbf162iubs256_mask ((__v16bf) __A, @@ -158,7 +158,7 @@ _mm256_maskz_ipcvtbf16_epu16 (__mmask16 __U, __m256bh __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_ipcvtph_epi16 (__m128h __A) +_mm_ipcvtph_epi8 (__m128h __A) { return (__m128i) __builtin_ia32_cvtph2ibs128_mask ((__v8hf) __A, (__v8hi) @@ -168,7 +168,7 @@ _mm_ipcvtph_epi16 (__m128h __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_mask_ipcvtph_epi16 (__m128i __W, __mmask8 __U, __m128h __A) +_mm_mask_ipcvtph_epi8 (__m128i __W, __mmask8 __U, __m128h __A) { return (__m128i) __builtin_ia32_cvtph2ibs128_mask ((__v8hf) __A, (__v8hi) __W, @@ -177,7 +177,7 @@ _mm_mask_ipcvtph_epi16 (__m128i __W, __mmask8 __U, __m128h __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_maskz_ipcvtph_epi16 (__mmask8 __U, __m128h __A) +_mm_maskz_ipcvtph_epi8 (__mmask8 __U, __m128h __A) { return (__m128i) __builtin_ia32_cvtph2ibs128_mask ((__v8hf) __A, (__v8hi) @@ -187,7 +187,7 @@ _mm_maskz_ipcvtph_epi16 (__mmask8 __U, __m128h __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_ipcvtph_epu16 (__m128h __A) +_mm_ipcvtph_epu8 (__m128h __A) { return (__m128i) __builtin_ia32_cvtph2iubs128_mask ((__v8hf) __A, (__v8hi) @@ -197,7 +197,7 @@ _mm_ipcvtph_epu16 (__m128h __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_mask_ipcvtph_epu16 (__m128i __W, __mmask8 __U, __m128h __A) +_mm_mask_ipcvtph_epu8 (__m128i __W, __mmask8 __U, __m128h __A) { return (__m128i) __builtin_ia32_cvtph2iubs128_mask ((__v8hf) __A, (__v8hi) __W, @@ -206,7 +206,7 @@ _mm_mask_ipcvtph_epu16 (__m128i __W, __mmask8 __U, __m128h __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_maskz_ipcvtph_epu16 (__mmask8 __U, __m128h __A) +_mm_maskz_ipcvtph_epu8 (__mmask8 __U, __m128h __A) { return (__m128i) __builtin_ia32_cvtph2iubs128_mask ((__v8hf) __A, (__v8hi) @@ -216,7 +216,7 @@ _mm_maskz_ipcvtph_epu16 (__mmask8 __U, __m128h __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_ipcvtps_epi32 (__m128 __A) +_mm_ipcvtps_epi8 (__m128 __A) { return (__m128i) __builtin_ia32_cvtps2ibs128_mask ((__v4sf) __A, (__v4si) @@ -226,7 +226,7 @@ _mm_ipcvtps_epi32 (__m128 __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_mask_ipcvtps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) +_mm_mask_ipcvtps_epi8 (__m128i __W, __mmask8 __U, __m128 __A) { return (__m128i) __builtin_ia32_cvtps2ibs128_mask ((__v4sf) __A, (__v4si) __W, @@ -235,7 +235,7 @@ _mm_mask_ipcvtps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_maskz_ipcvtps_epi32 (__mmask8 __U, __m128 __A) +_mm_maskz_ipcvtps_epi8 (__mmask8 __U, __m128 __A) { return (__m128i) __builtin_ia32_cvtps2ibs128_mask ((__v4sf) __A, (__v4si) @@ -245,7 +245,7 @@ _mm_maskz_ipcvtps_epi32 (__mmask8 __U, __m128 __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_ipcvtps_epu32 (__m128 __A) +_mm_ipcvtps_epu8 (__m128 __A) { return (__m128i) __builtin_ia32_cvtps2iubs128_mask ((__v4sf) __A, (__v4si) @@ -255,7 +255,7 @@ _mm_ipcvtps_epu32 (__m128 __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_mask_ipcvtps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) +_mm_mask_ipcvtps_epu8 (__m128i __W, __mmask8 __U, __m128 __A) { return (__m128i) __builtin_ia32_cvtps2iubs128_mask ((__v4sf) __A, (__v4si) __W, @@ -264,7 +264,7 @@ _mm_mask_ipcvtps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_maskz_ipcvtps_epu32 (__mmask8 __U, __m128 __A) +_mm_maskz_ipcvtps_epu8 (__mmask8 __U, __m128 __A) { return (__m128i) __builtin_ia32_cvtps2iubs128_mask ((__v4sf) __A, (__v4si) @@ -274,7 +274,7 @@ _mm_maskz_ipcvtps_epu32 (__mmask8 __U, __m128 __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_ipcvttbf16_epi16 (__m128bh __A) +_mm_ipcvttbf16_epi8 (__m128bh __A) { return (__m128i) __builtin_ia32_cvttbf162ibs128_mask ((__v8bf) __A, @@ -285,7 +285,7 @@ _mm_ipcvttbf16_epi16 (__m128bh __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_mask_ipcvttbf16_epi16 (__m128i __W, __mmask8 __U, __m128bh __A) +_mm_mask_ipcvttbf16_epi8 (__m128i __W, __mmask8 __U, __m128bh __A) { return (__m128i) __builtin_ia32_cvttbf162ibs128_mask ((__v8bf) __A, (__v8hi) __W, @@ -294,7 +294,7 @@ _mm_mask_ipcvttbf16_epi16 (__m128i __W, __mmask8 __U, __m128bh __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_maskz_ipcvttbf16_epi16 (__mmask8 __U, __m128bh __A) +_mm_maskz_ipcvttbf16_epi8 (__mmask8 __U, __m128bh __A) { return (__m128i) __builtin_ia32_cvttbf162ibs128_mask ((__v8bf) __A, (__v8hi) @@ -304,7 +304,7 @@ _mm_maskz_ipcvttbf16_epi16 (__mmask8 __U, __m128bh __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_ipcvttbf16_epu16 (__m128bh __A) +_mm_ipcvttbf16_epu8 (__m128bh __A) { return (__m128i) __builtin_ia32_cvttbf162iubs128_mask ((__v8bf) __A, @@ -315,7 +315,7 @@ _mm_ipcvttbf16_epu16 (__m128bh __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_mask_ipcvttbf16_epu16 (__m128i __W, __mmask8 __U, __m128bh __A) +_mm_mask_ipcvttbf16_epu8 (__m128i __W, __mmask8 __U, __m128bh __A) { return (__m128i) __builtin_ia32_cvttbf162iubs128_mask ((__v8bf) __A, (__v8hi) __W, @@ -324,7 +324,7 @@ _mm_mask_ipcvttbf16_epu16 (__m128i __W, __mmask8 __U, __m128bh __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_maskz_ipcvttbf16_epu16 (__mmask8 __U, __m128bh __A) +_mm_maskz_ipcvttbf16_epu8 (__mmask8 __U, __m128bh __A) { return (__m128i) __builtin_ia32_cvttbf162iubs128_mask ((__v8bf) __A, (__v8hi) @@ -334,7 +334,7 @@ _mm_maskz_ipcvttbf16_epu16 (__mmask8 __U, __m128bh __A) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_ipcvttbf16_epi16 (__m256bh __A) +_mm256_ipcvttbf16_epi8 (__m256bh __A) { return (__m256i) __builtin_ia32_cvttbf162ibs256_mask ((__v16bf) __A, @@ -344,7 +344,7 @@ _mm256_ipcvttbf16_epi16 (__m256bh __A) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_ipcvttbf16_epi16 (__m256i __W, __mmask16 __U, __m256bh __A) +_mm256_mask_ipcvttbf16_epi8 (__m256i __W, __mmask16 __U, __m256bh __A) { return (__m256i) __builtin_ia32_cvttbf162ibs256_mask ((__v16bf) __A, (__v16hi) __W, @@ -353,7 +353,7 @@ _mm256_mask_ipcvttbf16_epi16 (__m256i __W, __mmask16 __U, __m256bh __A) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_maskz_ipcvttbf16_epi16 (__mmask16 __U, __m256bh __A) +_mm256_maskz_ipcvttbf16_epi8 (__mmask16 __U, __m256bh __A) { return (__m256i) __builtin_ia32_cvttbf162ibs256_mask ((__v16bf) __A, @@ -363,7 +363,7 @@ _mm256_maskz_ipcvttbf16_epi16 (__mmask16 __U, __m256bh __A) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_ipcvttbf16_epu16 (__m256bh __A) +_mm256_ipcvttbf16_epu8 (__m256bh __A) { return (__m256i) __builtin_ia32_cvttbf162iubs256_mask ((__v16bf) __A, @@ -373,7 +373,7 @@ _mm256_ipcvttbf16_epu16 (__m256bh __A) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_ipcvttbf16_epu16 (__m256i __W, __mmask16 __U, __m256bh __A) +_mm256_mask_ipcvttbf16_epu8 (__m256i __W, __mmask16 __U, __m256bh __A) { return (__m256i) __builtin_ia32_cvttbf162iubs256_mask ((__v16bf) __A, (__v16hi) __W, @@ -382,7 +382,7 @@ _mm256_mask_ipcvttbf16_epu16 (__m256i __W, __mmask16 __U, __m256bh __A) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_maskz_ipcvttbf16_epu16 (__mmask16 __U, __m256bh __A) +_mm256_maskz_ipcvttbf16_epu8 (__mmask16 __U, __m256bh __A) { return (__m256i) __builtin_ia32_cvttbf162iubs256_mask ((__v16bf) __A, @@ -392,7 +392,7 @@ _mm256_maskz_ipcvttbf16_epu16 (__mmask16 __U, __m256bh __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_ipcvttph_epi16 (__m128h __A) +_mm_ipcvttph_epi8 (__m128h __A) { return (__m128i) __builtin_ia32_cvttph2ibs128_mask ((__v8hf) __A, (__v8hi) @@ -402,7 +402,7 @@ _mm_ipcvttph_epi16 (__m128h __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_mask_ipcvttph_epi16 (__m128i __W, __mmask8 __U, __m128h __A) +_mm_mask_ipcvttph_epi8 (__m128i __W, __mmask8 __U, __m128h __A) { return (__m128i) __builtin_ia32_cvttph2ibs128_mask ((__v8hf) __A, (__v8hi) __W, @@ -411,7 +411,7 @@ _mm_mask_ipcvttph_epi16 (__m128i __W, __mmask8 __U, __m128h __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_maskz_ipcvttph_epi16 (__mmask8 __U, __m128h __A) +_mm_maskz_ipcvttph_epi8 (__mmask8 __U, __m128h __A) { return (__m128i) __builtin_ia32_cvttph2ibs128_mask ((__v8hf) __A, (__v8hi) @@ -421,7 +421,7 @@ _mm_maskz_ipcvttph_epi16 (__mmask8 __U, __m128h __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_ipcvttph_epu16 (__m128h __A) +_mm_ipcvttph_epu8 (__m128h __A) { return (__m128i) __builtin_ia32_cvttph2iubs128_mask ((__v8hf) __A, (__v8hi) @@ -431,7 +431,7 @@ _mm_ipcvttph_epu16 (__m128h __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_mask_ipcvttph_epu16 (__m128i __W, __mmask8 __U, __m128h __A) +_mm_mask_ipcvttph_epu8 (__m128i __W, __mmask8 __U, __m128h __A) { return (__m128i) __builtin_ia32_cvttph2iubs128_mask ((__v8hf) __A, (__v8hi) __W, @@ -440,7 +440,7 @@ _mm_mask_ipcvttph_epu16 (__m128i __W, __mmask8 __U, __m128h __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_maskz_ipcvttph_epu16 (__mmask8 __U, __m128h __A) +_mm_maskz_ipcvttph_epu8 (__mmask8 __U, __m128h __A) { return (__m128i) __builtin_ia32_cvttph2iubs128_mask ((__v8hf) __A, (__v8hi) @@ -450,7 +450,7 @@ _mm_maskz_ipcvttph_epu16 (__mmask8 __U, __m128h __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_ipcvttps_epi32 (__m128 __A) +_mm_ipcvttps_epi8 (__m128 __A) { return (__m128i) __builtin_ia32_cvttps2ibs128_mask ((__v4sf) __A, (__v4si) @@ -460,7 +460,7 @@ _mm_ipcvttps_epi32 (__m128 __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_mask_ipcvttps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) +_mm_mask_ipcvttps_epi8 (__m128i __W, __mmask8 __U, __m128 __A) { return (__m128i) __builtin_ia32_cvttps2ibs128_mask ((__v4sf) __A, (__v4si) __W, @@ -469,7 +469,7 @@ _mm_mask_ipcvttps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_maskz_ipcvttps_epi32 (__mmask8 __U, __m128 __A) +_mm_maskz_ipcvttps_epi8 (__mmask8 __U, __m128 __A) { return (__m128i) __builtin_ia32_cvttps2ibs128_mask ((__v4sf) __A, (__v4si) @@ -479,7 +479,7 @@ _mm_maskz_ipcvttps_epi32 (__mmask8 __U, __m128 __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_ipcvttps_epu32 (__m128 __A) +_mm_ipcvttps_epu8 (__m128 __A) { return (__m128i) __builtin_ia32_cvttps2iubs128_mask ((__v4sf) __A, (__v4si) @@ -489,7 +489,7 @@ _mm_ipcvttps_epu32 (__m128 __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_mask_ipcvttps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) +_mm_mask_ipcvttps_epu8 (__m128i __W, __mmask8 __U, __m128 __A) { return (__m128i) __builtin_ia32_cvttps2iubs128_mask ((__v4sf) __A, (__v4si) __W, @@ -498,7 +498,7 @@ _mm_mask_ipcvttps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm_maskz_ipcvttps_epu32 (__mmask8 __U, __m128 __A) +_mm_maskz_ipcvttps_epu8 (__mmask8 __U, __m128 __A) { return (__m128i) __builtin_ia32_cvttps2iubs128_mask ((__v4sf) __A, (__v4si) @@ -741,7 +741,7 @@ _mm_maskz_cvttsps_epu64 (__mmask8 __U, __m128 __A) #ifdef __OPTIMIZE__ extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_ipcvt_roundph_epi16 (__m256h __A, const int __R) +_mm256_ipcvt_roundph_epi8 (__m256h __A, const int __R) { return (__m256i) __builtin_ia32_cvtph2ibs256_mask_round ((__v16hf) __A, @@ -753,8 +753,8 @@ _mm256_ipcvt_roundph_epi16 (__m256h __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_ipcvt_roundph_epi16 (__m256i __W, __mmask16 __U, __m256h __A, - const int __R) +_mm256_mask_ipcvt_roundph_epi8 (__m256i __W, __mmask16 __U, __m256h __A, + const int __R) { return (__m256i) __builtin_ia32_cvtph2ibs256_mask_round ((__v16hf) __A, (__v16hi) __W, @@ -764,7 +764,7 @@ _mm256_mask_ipcvt_roundph_epi16 (__m256i __W, __mmask16 __U, __m256h __A, extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_maskz_ipcvt_roundph_epi16 (__mmask16 __U, __m256h __A, const int __R) +_mm256_maskz_ipcvt_roundph_epi8 (__mmask16 __U, __m256h __A, const int __R) { return (__m256i) __builtin_ia32_cvtph2ibs256_mask_round ((__v16hf) __A, @@ -776,7 +776,7 @@ _mm256_maskz_ipcvt_roundph_epi16 (__mmask16 __U, __m256h __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_ipcvt_roundph_epu16 (__m256h __A, const int __R) +_mm256_ipcvt_roundph_epu8 (__m256h __A, const int __R) { return (__m256i) __builtin_ia32_cvtph2iubs256_mask_round ((__v16hf) __A, @@ -788,8 +788,8 @@ _mm256_ipcvt_roundph_epu16 (__m256h __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_ipcvt_roundph_epu16 (__m256i __W, __mmask16 __U, __m256h __A, - const int __R) +_mm256_mask_ipcvt_roundph_epu8 (__m256i __W, __mmask16 __U, __m256h __A, + const int __R) { return (__m256i) __builtin_ia32_cvtph2iubs256_mask_round ((__v16hf) __A, (__v16hi) __W, @@ -799,7 +799,7 @@ _mm256_mask_ipcvt_roundph_epu16 (__m256i __W, __mmask16 __U, __m256h __A, extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_maskz_ipcvt_roundph_epu16 (__mmask16 __U, __m256h __A, const int __R) +_mm256_maskz_ipcvt_roundph_epu8 (__mmask16 __U, __m256h __A, const int __R) { return (__m256i) __builtin_ia32_cvtph2iubs256_mask_round ((__v16hf) __A, @@ -811,7 +811,7 @@ _mm256_maskz_ipcvt_roundph_epu16 (__mmask16 __U, __m256h __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_ipcvt_roundps_epi32 (__m256 __A, const int __R) +_mm256_ipcvt_roundps_epi8 (__m256 __A, const int __R) { return (__m256i) __builtin_ia32_cvtps2ibs256_mask_round ((__v8sf) __A, @@ -823,8 +823,8 @@ _mm256_ipcvt_roundps_epi32 (__m256 __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_ipcvt_roundps_epi32 (__m256i __W, __mmask8 __U, __m256 __A, - const int __R) +_mm256_mask_ipcvt_roundps_epi8 (__m256i __W, __mmask8 __U, __m256 __A, + const int __R) { return (__m256i) __builtin_ia32_cvtps2ibs256_mask_round ((__v8sf) __A, (__v8si) __W, @@ -834,7 +834,7 @@ _mm256_mask_ipcvt_roundps_epi32 (__m256i __W, __mmask8 __U, __m256 __A, extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_maskz_ipcvt_roundps_epi32 (__mmask8 __U, __m256 __A, const int __R) +_mm256_maskz_ipcvt_roundps_epi8 (__mmask8 __U, __m256 __A, const int __R) { return (__m256i) __builtin_ia32_cvtps2ibs256_mask_round ((__v8sf) __A, @@ -846,7 +846,7 @@ _mm256_maskz_ipcvt_roundps_epi32 (__mmask8 __U, __m256 __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_ipcvt_roundps_epu32 (__m256 __A, const int __R) +_mm256_ipcvt_roundps_epu8 (__m256 __A, const int __R) { return (__m256i) __builtin_ia32_cvtps2iubs256_mask_round ((__v8sf) __A, @@ -858,8 +858,8 @@ _mm256_ipcvt_roundps_epu32 (__m256 __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_ipcvt_roundps_epu32 (__m256i __W, __mmask8 __U, __m256 __A, - const int __R) +_mm256_mask_ipcvt_roundps_epu8 (__m256i __W, __mmask8 __U, __m256 __A, + const int __R) { return (__m256i) __builtin_ia32_cvtps2iubs256_mask_round ((__v8sf) __A, (__v8si) __W, @@ -869,7 +869,7 @@ _mm256_mask_ipcvt_roundps_epu32 (__m256i __W, __mmask8 __U, __m256 __A, extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_maskz_ipcvt_roundps_epu32 (__mmask8 __U, __m256 __A, const int __R) +_mm256_maskz_ipcvt_roundps_epu8 (__mmask8 __U, __m256 __A, const int __R) { return (__m256i) __builtin_ia32_cvtps2iubs256_mask_round ((__v8sf) __A, @@ -881,7 +881,7 @@ _mm256_maskz_ipcvt_roundps_epu32 (__mmask8 __U, __m256 __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_ipcvtt_roundph_epi16 (__m256h __A, const int __R) +_mm256_ipcvtt_roundph_epi8 (__m256h __A, const int __R) { return (__m256i) __builtin_ia32_cvttph2ibs256_mask_round ((__v16hf) __A, @@ -893,8 +893,8 @@ _mm256_ipcvtt_roundph_epi16 (__m256h __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_ipcvtt_roundph_epi16 (__m256i __W, __mmask16 __U, __m256h __A, - const int __R) +_mm256_mask_ipcvtt_roundph_epi8 (__m256i __W, __mmask16 __U, __m256h __A, + const int __R) { return (__m256i) __builtin_ia32_cvttph2ibs256_mask_round ((__v16hf) __A, (__v16hi) __W, @@ -904,7 +904,7 @@ _mm256_mask_ipcvtt_roundph_epi16 (__m256i __W, __mmask16 __U, __m256h __A, extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_maskz_ipcvtt_roundph_epi16 (__mmask16 __U, __m256h __A, const int __R) +_mm256_maskz_ipcvtt_roundph_epi8 (__mmask16 __U, __m256h __A, const int __R) { return (__m256i) __builtin_ia32_cvttph2ibs256_mask_round ((__v16hf) __A, @@ -916,7 +916,7 @@ _mm256_maskz_ipcvtt_roundph_epi16 (__mmask16 __U, __m256h __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_ipcvtt_roundph_epu16 (__m256h __A, const int __R) +_mm256_ipcvtt_roundph_epu8 (__m256h __A, const int __R) { return (__m256i) __builtin_ia32_cvttph2iubs256_mask_round ((__v16hf) __A, @@ -928,8 +928,8 @@ _mm256_ipcvtt_roundph_epu16 (__m256h __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_ipcvtt_roundph_epu16 (__m256i __W, __mmask16 __U, __m256h __A, - const int __R) +_mm256_mask_ipcvtt_roundph_epu8 (__m256i __W, __mmask16 __U, __m256h __A, + const int __R) { return (__m256i) __builtin_ia32_cvttph2iubs256_mask_round ((__v16hf) __A, (__v16hi) __W, @@ -939,7 +939,7 @@ _mm256_mask_ipcvtt_roundph_epu16 (__m256i __W, __mmask16 __U, __m256h __A, extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_maskz_ipcvtt_roundph_epu16 (__mmask16 __U, __m256h __A, const int __R) +_mm256_maskz_ipcvtt_roundph_epu8 (__mmask16 __U, __m256h __A, const int __R) { return (__m256i) __builtin_ia32_cvttph2iubs256_mask_round ((__v16hf) __A, @@ -951,7 +951,7 @@ _mm256_maskz_ipcvtt_roundph_epu16 (__mmask16 __U, __m256h __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_ipcvtt_roundps_epi32 (__m256 __A, const int __R) +_mm256_ipcvtt_roundps_epi8 (__m256 __A, const int __R) { return (__m256i) __builtin_ia32_cvttps2ibs256_mask_round ((__v8sf) __A, @@ -963,8 +963,8 @@ _mm256_ipcvtt_roundps_epi32 (__m256 __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_ipcvtt_roundps_epi32 (__m256i __W, __mmask8 __U, __m256 __A, - const int __R) +_mm256_mask_ipcvtt_roundps_epi8 (__m256i __W, __mmask8 __U, __m256 __A, + const int __R) { return (__m256i) __builtin_ia32_cvttps2ibs256_mask_round ((__v8sf) __A, (__v8si) __W, @@ -974,7 +974,7 @@ _mm256_mask_ipcvtt_roundps_epi32 (__m256i __W, __mmask8 __U, __m256 __A, extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_maskz_ipcvtt_roundps_epi32 (__mmask8 __U, __m256 __A, const int __R) +_mm256_maskz_ipcvtt_roundps_epi8 (__mmask8 __U, __m256 __A, const int __R) { return (__m256i) __builtin_ia32_cvttps2ibs256_mask_round ((__v8sf) __A, @@ -986,7 +986,7 @@ _mm256_maskz_ipcvtt_roundps_epi32 (__mmask8 __U, __m256 __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_ipcvtt_roundps_epu32 (__m256 __A, const int __R) +_mm256_ipcvtt_roundps_epu8 (__m256 __A, const int __R) { return (__m256i) __builtin_ia32_cvttps2iubs256_mask_round ((__v8sf) __A, @@ -998,8 +998,8 @@ _mm256_ipcvtt_roundps_epu32 (__m256 __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_ipcvtt_roundps_epu32 (__m256i __W, __mmask8 __U, __m256 __A, - const int __R) +_mm256_mask_ipcvtt_roundps_epu8 (__m256i __W, __mmask8 __U, __m256 __A, + const int __R) { return (__m256i) __builtin_ia32_cvttps2iubs256_mask_round ((__v8sf) __A, (__v8si) __W, @@ -1009,7 +1009,7 @@ _mm256_mask_ipcvtt_roundps_epu32 (__m256i __W, __mmask8 __U, __m256 __A, extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_maskz_ipcvtt_roundps_epu32 (__mmask8 __U, __m256 __A, const int __R) +_mm256_maskz_ipcvtt_roundps_epu8 (__mmask8 __U, __m256 __A, const int __R) { return (__m256i) __builtin_ia32_cvttps2iubs256_mask_round ((__v8sf) __A, @@ -1034,7 +1034,7 @@ _mm256_cvtts_roundpd_epi32 (__m256d __A, const int __R) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_cvtts_roundpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A, - const int __R) + const int __R) { return (__m128i) __builtin_ia32_cvttpd2dqs256_mask_round ((__v4df) __A, (__v4si) __W, @@ -1069,7 +1069,7 @@ _mm256_cvtts_roundpd_epi64 (__m256d __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_cvtts_roundpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A, - const int __R) + const int __R) { return (__m256i) __builtin_ia32_cvttpd2qqs256_mask_round ((__v4df) __A, (__v4di) __W, @@ -1104,7 +1104,7 @@ _mm256_cvtts_roundpd_epu32 (__m256d __A, const int __R) extern __inline __m128i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_cvtts_roundpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A, - const int __R) + const int __R) { return (__m128i) __builtin_ia32_cvttpd2udqs256_mask_round ((__v4df) __A, (__v4si) __W, @@ -1139,7 +1139,7 @@ _mm256_cvtts_roundpd_epu64 (__m256d __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_cvtts_roundpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A, - const int __R) + const int __R) { return (__m256i) __builtin_ia32_cvttpd2uqqs256_mask_round ((__v4df) __A, (__v4di) __W, @@ -1174,7 +1174,7 @@ _mm256_cvtts_roundps_epi32 (__m256 __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_cvtts_roundps_epi32 (__m256i __W, __mmask8 __U, __m256 __A, - const int __R) + const int __R) { return (__m256i) __builtin_ia32_cvttps2dqs256_mask_round ((__v8sf) __A, (__v8si) __W, @@ -1209,7 +1209,7 @@ _mm256_cvtts_roundps_epi64 (__m128 __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_cvtts_roundps_epi64 (__m256i __W, __mmask8 __U, __m128 __A, - const int __R) + const int __R) { return (__m256i) __builtin_ia32_cvttps2qqs256_mask_round ((__v4sf) __A, (__v4di) __W, @@ -1244,7 +1244,7 @@ _mm256_cvtts_roundps_epu32 (__m256 __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_cvtts_roundps_epu32 (__m256i __W, __mmask8 __U, __m256 __A, - const int __R) + const int __R) { return (__m256i) __builtin_ia32_cvttps2udqs256_mask_round ((__v8sf) __A, (__v8si) __W, @@ -1279,7 +1279,7 @@ _mm256_cvtts_roundps_epu64 (__m128 __A, const int __R) extern __inline __m256i __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) _mm256_mask_cvtts_roundps_epu64 (__m256i __W, __mmask8 __U, __m128 __A, - const int __R) + const int __R) { return (__m256i) __builtin_ia32_cvttps2uqqs256_mask_round ((__v4sf) __A, (__v4di) __W, @@ -1332,7 +1332,7 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R) } #else -#define _mm256_ipcvt_roundph_epi16(A, R) \ +#define _mm256_ipcvt_roundph_epi8(A, R) \ ((__m256i) \ __builtin_ia32_cvtph2ibs256_mask_round ((__v16hf) (A), \ (__v16hi) \ @@ -1340,13 +1340,13 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R) (__mmask16) (-1), \ (R))) -#define _mm256_mask_ipcvt_roundph_epi16(W, U, A, R) \ +#define _mm256_mask_ipcvt_roundph_epi8(W, U, A, R) \ ((__m256i) __builtin_ia32_cvtph2ibs256_mask_round ((__v16hf) (A), \ (__v16hi) (W), \ (__mmask16) (U), \ (R))) -#define _mm256_maskz_ipcvt_roundph_epi16(U, A, R) \ +#define _mm256_maskz_ipcvt_roundph_epi8(U, A, R) \ ((__m256i) \ __builtin_ia32_cvtph2ibs256_mask_round ((__v16hf) (A), \ (__v16hi) \ @@ -1354,7 +1354,7 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R) (__mmask16) (U), \ (R))) -#define _mm256_ipcvt_roundph_epu16(A, R) \ +#define _mm256_ipcvt_roundph_epu8(A, R) \ ((__m256i) \ __builtin_ia32_cvtph2iubs256_mask_round ((__v16hf) (A), \ (__v16hi) \ @@ -1362,13 +1362,13 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R) (__mmask16) (-1), \ (R))) -#define _mm256_mask_ipcvt_roundph_epu16(W, U, A, R) \ +#define _mm256_mask_ipcvt_roundph_epu8(W, U, A, R) \ ((__m256i) __builtin_ia32_cvtph2iubs256_mask_round ((__v16hf) (A), \ (__v16hi) (W), \ (__mmask16) (U), \ (R))) -#define _mm256_maskz_ipcvt_roundph_epu16(U, A, R) \ +#define _mm256_maskz_ipcvt_roundph_epu8(U, A, R) \ ((__m256i) \ __builtin_ia32_cvtph2iubs256_mask_round ((__v16hf) (A), \ (__v16hi) \ @@ -1376,7 +1376,7 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R) (__mmask16) (U), \ (R))) -#define _mm256_ipcvt_roundps_epi32(A, R) \ +#define _mm256_ipcvt_roundps_epi8(A, R) \ ((__m256i) \ __builtin_ia32_cvtps2ibs256_mask_round ((__v8sf) (A), \ (__v8si) \ @@ -1384,13 +1384,13 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R) (__mmask8) (-1), \ (R))) -#define _mm256_mask_ipcvt_roundps_epi32(W, U, A, R) \ +#define _mm256_mask_ipcvt_roundps_epi8(W, U, A, R) \ ((__m256i) __builtin_ia32_cvtps2ibs256_mask_round ((__v8sf) (A), \ (__v8si) (W), \ (__mmask8) (U), \ (R))) -#define _mm256_maskz_ipcvt_roundps_epi32(U, A, R) \ +#define _mm256_maskz_ipcvt_roundps_epi8(U, A, R) \ ((__m256i) \ __builtin_ia32_cvtps2ibs256_mask_round ((__v8sf) (A), \ (__v8si) \ @@ -1398,7 +1398,7 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R) (__mmask8) (U), \ (R))) -#define _mm256_ipcvt_roundps_epu32(A, R) \ +#define _mm256_ipcvt_roundps_epu8(A, R) \ ((__m256i) \ __builtin_ia32_cvtps2iubs256_mask_round ((__v8sf) (A), \ (__v8si) \ @@ -1406,13 +1406,13 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R) (__mmask8) (-1), \ (R))) -#define _mm256_mask_ipcvt_roundps_epu32(W, U, A, R) \ +#define _mm256_mask_ipcvt_roundps_epu8(W, U, A, R) \ ((__m256i) __builtin_ia32_cvtps2iubs256_mask_round ((__v8sf) (A), \ (__v8si) (W), \ (__mmask8) (U), \ (R))) -#define _mm256_maskz_ipcvt_roundps_epu32(U, A, R) \ +#define _mm256_maskz_ipcvt_roundps_epu8(U, A, R) \ ((__m256i) \ __builtin_ia32_cvtps2iubs256_mask_round ((__v8sf) (A), \ (__v8si) \ @@ -1420,7 +1420,7 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R) (__mmask8) (U), \ (R))) -#define _mm256_ipcvtt_roundph_epi16(A, R) \ +#define _mm256_ipcvtt_roundph_epi8(A, R) \ ((__m256i) \ __builtin_ia32_cvttph2ibs256_mask_round ((__v16hf) (A), \ (__v16hi) \ @@ -1428,13 +1428,13 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R) (__mmask16) (-1), \ (R))) -#define _mm256_mask_ipcvtt_roundph_epi16(W, U, A, R) \ +#define _mm256_mask_ipcvtt_roundph_epi8(W, U, A, R) \ ((__m256i) __builtin_ia32_cvttph2ibs256_mask_round ((__v16hf) (A), \ (__v16hi) (W), \ (__mmask16) (U), \ (R))) -#define _mm256_maskz_ipcvtt_roundph_epi16(U, A, R) \ +#define _mm256_maskz_ipcvtt_roundph_epi8(U, A, R) \ ((__m256i) \ __builtin_ia32_cvttph2ibs256_mask_round ((__v16hf) (A), \ (__v16hi) \ @@ -1442,7 +1442,7 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R) (__mmask16) (U), \ (R))) -#define _mm256_ipcvtt_roundph_epu16(A, R) \ +#define _mm256_ipcvtt_roundph_epu8(A, R) \ ((__m256i) \ __builtin_ia32_cvttph2iubs256_mask_round ((__v16hf) (A), \ (__v16hi) \ @@ -1450,13 +1450,13 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R) (__mmask16) (-1), \ (R))) -#define _mm256_mask_ipcvtt_roundph_epu16(W, U, A, R) \ +#define _mm256_mask_ipcvtt_roundph_epu8(W, U, A, R) \ ((__m256i) __builtin_ia32_cvttph2iubs256_mask_round ((__v16hf) (A), \ (__v16hi) (W), \ (__mmask16) (U), \ (R))) -#define _mm256_maskz_ipcvtt_roundph_epu16(U, A, R) \ +#define _mm256_maskz_ipcvtt_roundph_epu8(U, A, R) \ ((__m256i) \ __builtin_ia32_cvttph2iubs256_mask_round ((__v16hf) (A), \ (__v16hi) \ @@ -1464,7 +1464,7 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R) (__mmask16) (U), \ (R))) -#define _mm256_ipcvtt_roundps_epi32(A, R) \ +#define _mm256_ipcvtt_roundps_epi8(A, R) \ ((__m256i) \ __builtin_ia32_cvttps2ibs256_mask_round ((__v8sf) (A), \ (__v8si) \ @@ -1472,13 +1472,13 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R) (__mmask8) (-1), \ (R))) -#define _mm256_mask_ipcvtt_roundps_epi32(W, U, A, R) \ +#define _mm256_mask_ipcvtt_roundps_epi8(W, U, A, R) \ ((__m256i) __builtin_ia32_cvttps2ibs256_mask_round ((__v8sf) (A), \ (__v8si) (W), \ (__mmask8) (U), \ (R))) -#define _mm256_maskz_ipcvtt_roundps_epi32(U, A, R) \ +#define _mm256_maskz_ipcvtt_roundps_epi8(U, A, R) \ ((__m256i) \ __builtin_ia32_cvttps2ibs256_mask_round ((__v8sf) (A), \ (__v8si) \ @@ -1486,7 +1486,7 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R) (__mmask8) (U), \ (R))) -#define _mm256_ipcvtt_roundps_epu32(A, R) \ +#define _mm256_ipcvtt_roundps_epu8(A, R) \ ((__m256i) \ __builtin_ia32_cvttps2iubs256_mask_round ((__v8sf) (A), \ (__v8si) \ @@ -1494,19 +1494,19 @@ _mm_cvtts_roundss_epu32 (__m128 __A, const int __R) (__mmask8) (-1), \ (R))) -#define _mm256_mask_ipcvtt_roundps_epu32(W, U, A, R) \ +#define _mm256_mask_ipcvtt_roundps_epu8(W, U, A, R) \ ((__m256i) __builtin_ia32_cvttps2iubs256_mask_round ((__v8sf) (A), \ (__v8si) (W), \ (__mmask8) (U), \ (R))) -#define _mm256_maskz_ipcvtt_roundps_epu32(U, A, R) \ -((__m256i) \ - __builtin_ia32_cvttps2iubs256_mask_round ((__v8sf) (A), \ - (__v8si) \ - (_mm256_setzero_si256 ()), \ - (__mmask8) (U), \ - (R))) +#define _mm256_maskz_ipcvtt_roundps_epu8(U, A, R) \ + ((__m256i) \ + __builtin_ia32_cvttps2iubs256_mask_round ((__v8sf) (A), \ + (__v8si) \ + (_mm256_setzero_si256 ()), \ + (__mmask8) (U), \ + (R))) #define _mm256_cvtts_roundpd_epi32(A, R) \ ((__m128i) \ diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-satcvt-1.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-satcvt-1.c index 341d258d24a..7f2f7caf4db 100644 --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-satcvt-1.c +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-satcvt-1.c @@ -77,53 +77,53 @@ volatile __mmask32 m32; void extern avx10_2_test (void) { - xi = _mm512_ipcvt_roundph_epi16 (xh, 4); - xi = _mm512_mask_ipcvt_roundph_epi16 (xi, m32, xh, 8); - xi = _mm512_maskz_ipcvt_roundph_epi16 (m32, xh, 11); + xi = _mm512_ipcvt_roundph_epi8 (xh, 4); + xi = _mm512_mask_ipcvt_roundph_epi8 (xi, m32, xh, 8); + xi = _mm512_maskz_ipcvt_roundph_epi8 (m32, xh, 11); - xi = _mm512_ipcvt_roundph_epu16 (xh, 4); - xi = _mm512_mask_ipcvt_roundph_epu16 (xi, m32, xh, 8); - xi = _mm512_maskz_ipcvt_roundph_epu16 (m32, xh, 11); + xi = _mm512_ipcvt_roundph_epu8 (xh, 4); + xi = _mm512_mask_ipcvt_roundph_epu8 (xi, m32, xh, 8); + xi = _mm512_maskz_ipcvt_roundph_epu8 (m32, xh, 11); - xi = _mm512_ipcvtt_roundph_epi16 (xh, 4); - xi = _mm512_mask_ipcvtt_roundph_epi16 (xi, m32, xh, 8); - xi = _mm512_maskz_ipcvtt_roundph_epi16 (m32, xh, 8); + xi = _mm512_ipcvtt_roundph_epi8 (xh, 4); + xi = _mm512_mask_ipcvtt_roundph_epi8 (xi, m32, xh, 8); + xi = _mm512_maskz_ipcvtt_roundph_epi8 (m32, xh, 8); - xi = _mm512_ipcvtt_roundph_epu16 (xh, 4); - xi = _mm512_mask_ipcvtt_roundph_epu16 (xi, m32, xh, 8); - xi = _mm512_maskz_ipcvtt_roundph_epu16 (m32, xh, 8); + xi = _mm512_ipcvtt_roundph_epu8 (xh, 4); + xi = _mm512_mask_ipcvtt_roundph_epu8 (xi, m32, xh, 8); + xi = _mm512_maskz_ipcvtt_roundph_epu8 (m32, xh, 8); - xi = _mm512_ipcvt_roundps_epi32 (x, 4); - xi = _mm512_mask_ipcvt_roundps_epi32 (xi, m16, x, 8); - xi = _mm512_maskz_ipcvt_roundps_epi32 (m16, x, 11); + xi = _mm512_ipcvt_roundps_epi8 (x, 4); + xi = _mm512_mask_ipcvt_roundps_epi8 (xi, m16, x, 8); + xi = _mm512_maskz_ipcvt_roundps_epi8 (m16, x, 11); - xi = _mm512_ipcvt_roundps_epu32 (x, 4); - xi = _mm512_mask_ipcvt_roundps_epu32 (xi, m16, x, 8); - xi = _mm512_maskz_ipcvt_roundps_epu32 (m16, x, 11); + xi = _mm512_ipcvt_roundps_epu8 (x, 4); + xi = _mm512_mask_ipcvt_roundps_epu8 (xi, m16, x, 8); + xi = _mm512_maskz_ipcvt_roundps_epu8 (m16, x, 11); - xi = _mm512_ipcvtt_roundps_epi32 (x, 4); - xi = _mm512_mask_ipcvtt_roundps_epi32 (xi, m16, x, 8); - xi = _mm512_maskz_ipcvtt_roundps_epi32 (m16, x, 8); + xi = _mm512_ipcvtt_roundps_epi8 (x, 4); + xi = _mm512_mask_ipcvtt_roundps_epi8 (xi, m16, x, 8); + xi = _mm512_maskz_ipcvtt_roundps_epi8 (m16, x, 8); - xi = _mm512_ipcvtt_roundps_epu32 (x, 4); - xi = _mm512_mask_ipcvtt_roundps_epu32 (xi, m16, x, 8); - xi = _mm512_maskz_ipcvtt_roundps_epu32 (m16, x, 8); + xi = _mm512_ipcvtt_roundps_epu8 (x, 4); + xi = _mm512_mask_ipcvtt_roundps_epu8 (xi, m16, x, 8); + xi = _mm512_maskz_ipcvtt_roundps_epu8 (m16, x, 8); - xi = _mm512_ipcvtbf16_epi16 (xbh); - xi = _mm512_mask_ipcvtbf16_epi16 (xi, m32, xbh); - xi = _mm512_maskz_ipcvtbf16_epi16 (m32, xbh); + xi = _mm512_ipcvtbf16_epi8 (xbh); + xi = _mm512_mask_ipcvtbf16_epi8 (xi, m32, xbh); + xi = _mm512_maskz_ipcvtbf16_epi8 (m32, xbh); - xi = _mm512_ipcvtbf16_epu16 (xbh); - xi = _mm512_mask_ipcvtbf16_epu16 (xi, m32, xbh); - xi = _mm512_maskz_ipcvtbf16_epu16 (m32, xbh); + xi = _mm512_ipcvtbf16_epu8 (xbh); + xi = _mm512_mask_ipcvtbf16_epu8 (xi, m32, xbh); + xi = _mm512_maskz_ipcvtbf16_epu8 (m32, xbh); - xi = _mm512_ipcvttbf16_epi16 (xbh); - xi = _mm512_mask_ipcvttbf16_epi16 (xi, m32, xbh); - xi = _mm512_maskz_ipcvttbf16_epi16 (m32, xbh); + xi = _mm512_ipcvttbf16_epi8 (xbh); + xi = _mm512_mask_ipcvttbf16_epi8 (xi, m32, xbh); + xi = _mm512_maskz_ipcvttbf16_epi8 (m32, xbh); - xi = _mm512_ipcvttbf16_epu16 (xbh); - xi = _mm512_mask_ipcvttbf16_epu16 (xi, m32, xbh); - xi = _mm512_maskz_ipcvttbf16_epu16 (m32, xbh); + xi = _mm512_ipcvttbf16_epu8 (xbh); + xi = _mm512_mask_ipcvttbf16_epu8 (xi, m32, xbh); + xi = _mm512_maskz_ipcvttbf16_epu8 (m32, xbh); hxi = _mm512_cvtts_roundpd_epi32 (xd, 8); hxi = _mm512_mask_cvtts_roundpd_epi32 (hxi, m8, xd, 8); diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbf162ibs-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbf162ibs-2.c index 73c67226394..35dde47b3a9 100644 --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbf162ibs-2.c +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbf162ibs-2.c @@ -50,9 +50,9 @@ TEST (void) for (i = 0; i < SIZE; i++) res2.a[i] = DEFAULT_VALUE; - res1.x = INTRINSIC (_ipcvtbf16_epi16) (s.x); - res2.x = INTRINSIC (_mask_ipcvtbf16_epi16) (res2.x, mask, s.x); - res3.x = INTRINSIC (_maskz_ipcvtbf16_epi16) (mask, s.x); + res1.x = INTRINSIC (_ipcvtbf16_epi8) (s.x); + res2.x = INTRINSIC (_mask_ipcvtbf16_epi8) (res2.x, mask, s.x); + res3.x = INTRINSIC (_maskz_ipcvtbf16_epi8) (mask, s.x); CALC (s.a, res_ref); diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbf162iubs-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbf162iubs-2.c index 59ed049dba6..9a1125fca86 100644 --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbf162iubs-2.c +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbf162iubs-2.c @@ -50,9 +50,9 @@ TEST (void) for (i = 0; i < SIZE; i++) res2.a[i] = DEFAULT_VALUE; - res1.x = INTRINSIC (_ipcvtbf16_epu16) (s.x); - res2.x = INTRINSIC (_mask_ipcvtbf16_epu16) (res2.x, mask, s.x); - res3.x = INTRINSIC (_maskz_ipcvtbf16_epu16) (mask, s.x); + res1.x = INTRINSIC (_ipcvtbf16_epu8) (s.x); + res2.x = INTRINSIC (_mask_ipcvtbf16_epu8) (res2.x, mask, s.x); + res3.x = INTRINSIC (_maskz_ipcvtbf16_epu8) (mask, s.x); CALC (s.a, res_ref); diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtph2ibs-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtph2ibs-2.c index 47d2a341af8..d8819f7d2c7 100644 --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtph2ibs-2.c +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtph2ibs-2.c @@ -50,13 +50,13 @@ TEST (void) res2.a[i] = DEFAULT_VALUE; #if AVX512F_LEN == 128 - res1.x = INTRINSIC (_ipcvtph_epi16) (s.x); - res2.x = INTRINSIC (_mask_ipcvtph_epi16) (res2.x, mask, s.x); - res3.x = INTRINSIC (_maskz_ipcvtph_epi16) (mask, s.x); + res1.x = INTRINSIC (_ipcvtph_epi8) (s.x); + res2.x = INTRINSIC (_mask_ipcvtph_epi8) (res2.x, mask, s.x); + res3.x = INTRINSIC (_maskz_ipcvtph_epi8) (mask, s.x); #else - res1.x = INTRINSIC (_ipcvt_roundph_epi16) (s.x, 8); - res2.x = INTRINSIC (_mask_ipcvt_roundph_epi16) (res2.x, mask, s.x, 8); - res3.x = INTRINSIC (_maskz_ipcvt_roundph_epi16) (mask, s.x, 8); + res1.x = INTRINSIC (_ipcvt_roundph_epi8) (s.x, 8); + res2.x = INTRINSIC (_mask_ipcvt_roundph_epi8) (res2.x, mask, s.x, 8); + res3.x = INTRINSIC (_maskz_ipcvt_roundph_epi8) (mask, s.x, 8); #endif CALC (s.a, res_ref); diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtph2iubs-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtph2iubs-2.c index ef008527f02..877f6f5e2bd 100644 --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtph2iubs-2.c +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtph2iubs-2.c @@ -50,13 +50,13 @@ TEST (void) res2.a[i] = DEFAULT_VALUE; #if AVX512F_LEN == 128 - res1.x = INTRINSIC (_ipcvtph_epu16) (s.x); - res2.x = INTRINSIC (_mask_ipcvtph_epu16) (res2.x, mask, s.x); - res3.x = INTRINSIC (_maskz_ipcvtph_epu16) (mask, s.x); + res1.x = INTRINSIC (_ipcvtph_epu8) (s.x); + res2.x = INTRINSIC (_mask_ipcvtph_epu8) (res2.x, mask, s.x); + res3.x = INTRINSIC (_maskz_ipcvtph_epu8) (mask, s.x); #else - res1.x = INTRINSIC (_ipcvt_roundph_epu16) (s.x, 8); - res2.x = INTRINSIC (_mask_ipcvt_roundph_epu16) (res2.x, mask, s.x, 8); - res3.x = INTRINSIC (_maskz_ipcvt_roundph_epu16) (mask, s.x, 8); + res1.x = INTRINSIC (_ipcvt_roundph_epu8) (s.x, 8); + res2.x = INTRINSIC (_mask_ipcvt_roundph_epu8) (res2.x, mask, s.x, 8); + res3.x = INTRINSIC (_maskz_ipcvt_roundph_epu8) (mask, s.x, 8); #endif CALC (s.a, res_ref); diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtps2ibs-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtps2ibs-2.c index b11ab1b77d6..3ebc9e79e38 100644 --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtps2ibs-2.c +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtps2ibs-2.c @@ -51,13 +51,13 @@ TEST (void) res2.a[i] = DEFAULT_VALUE; #if AVX512F_LEN == 128 - res1.x = INTRINSIC (_ipcvtps_epi32) (s.x); - res2.x = INTRINSIC (_mask_ipcvtps_epi32) (res2.x, mask, s.x); - res3.x = INTRINSIC (_maskz_ipcvtps_epi32) (mask, s.x); + res1.x = INTRINSIC (_ipcvtps_epi8) (s.x); + res2.x = INTRINSIC (_mask_ipcvtps_epi8) (res2.x, mask, s.x); + res3.x = INTRINSIC (_maskz_ipcvtps_epi8) (mask, s.x); #else - res1.x = INTRINSIC (_ipcvt_roundps_epi32) (s.x, 8); - res2.x = INTRINSIC (_mask_ipcvt_roundps_epi32) (res2.x, mask, s.x, 8); - res3.x = INTRINSIC (_maskz_ipcvt_roundps_epi32) (mask, s.x, 8); + res1.x = INTRINSIC (_ipcvt_roundps_epi8) (s.x, 8); + res2.x = INTRINSIC (_mask_ipcvt_roundps_epi8) (res2.x, mask, s.x, 8); + res3.x = INTRINSIC (_maskz_ipcvt_roundps_epi8) (mask, s.x, 8); #endif CALC (s.a, res_ref); diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtps2iubs-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtps2iubs-2.c index 33019c1e765..9e75076b5da 100644 --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtps2iubs-2.c +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtps2iubs-2.c @@ -49,13 +49,13 @@ TEST (void) res2.a[i] = DEFAULT_VALUE; #if AVX512F_LEN == 128 - res1.x = INTRINSIC (_ipcvtps_epu32) (s.x); - res2.x = INTRINSIC (_mask_ipcvtps_epu32) (res2.x, mask, s.x); - res3.x = INTRINSIC (_maskz_ipcvtps_epu32) (mask, s.x); + res1.x = INTRINSIC (_ipcvtps_epu8) (s.x); + res2.x = INTRINSIC (_mask_ipcvtps_epu8) (res2.x, mask, s.x); + res3.x = INTRINSIC (_maskz_ipcvtps_epu8) (mask, s.x); #else - res1.x = INTRINSIC (_ipcvt_roundps_epu32) (s.x, 8); - res2.x = INTRINSIC (_mask_ipcvt_roundps_epu32) (res2.x, mask, s.x, 8); - res3.x = INTRINSIC (_maskz_ipcvt_roundps_epu32) (mask, s.x, 8); + res1.x = INTRINSIC (_ipcvt_roundps_epu8) (s.x, 8); + res2.x = INTRINSIC (_mask_ipcvt_roundps_epu8) (res2.x, mask, s.x, 8); + res3.x = INTRINSIC (_maskz_ipcvt_roundps_epu8) (mask, s.x, 8); #endif CALC (s.a, res_ref); diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttbf162ibs-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttbf162ibs-2.c index 47688f5c270..b91d95145b1 100644 --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttbf162ibs-2.c +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttbf162ibs-2.c @@ -50,9 +50,9 @@ TEST (void) for (i = 0; i < SIZE; i++) res2.a[i] = DEFAULT_VALUE; - res1.x = INTRINSIC (_ipcvttbf16_epi16) (s.x); - res2.x = INTRINSIC (_mask_ipcvttbf16_epi16) (res2.x, mask, s.x); - res3.x = INTRINSIC (_maskz_ipcvttbf16_epi16) (mask, s.x); + res1.x = INTRINSIC (_ipcvttbf16_epi8) (s.x); + res2.x = INTRINSIC (_mask_ipcvttbf16_epi8) (res2.x, mask, s.x); + res3.x = INTRINSIC (_maskz_ipcvttbf16_epi8) (mask, s.x); CALC (s.a, res_ref); diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttbf162iubs-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttbf162iubs-2.c index 97cb11b04b9..e648ca2bce9 100644 --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttbf162iubs-2.c +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttbf162iubs-2.c @@ -50,9 +50,9 @@ TEST (void) for (i = 0; i < SIZE; i++) res2.a[i] = DEFAULT_VALUE; - res1.x = INTRINSIC (_ipcvttbf16_epu16) (s.x); - res2.x = INTRINSIC (_mask_ipcvttbf16_epu16) (res2.x, mask, s.x); - res3.x = INTRINSIC (_maskz_ipcvttbf16_epu16) (mask, s.x); + res1.x = INTRINSIC (_ipcvttbf16_epu8) (s.x); + res2.x = INTRINSIC (_mask_ipcvttbf16_epu8) (res2.x, mask, s.x); + res3.x = INTRINSIC (_maskz_ipcvttbf16_epu8) (mask, s.x); CALC (s.a, res_ref); diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttph2ibs-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttph2ibs-2.c index 711572730fe..6e91c75e9e5 100644 --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttph2ibs-2.c +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttph2ibs-2.c @@ -50,13 +50,13 @@ TEST (void) res2.a[i] = DEFAULT_VALUE; #if AVX512F_LEN == 128 - res1.x = INTRINSIC (_ipcvttph_epi16) (s.x); - res2.x = INTRINSIC (_mask_ipcvttph_epi16) (res2.x, mask, s.x); - res3.x = INTRINSIC (_maskz_ipcvttph_epi16) (mask, s.x); + res1.x = INTRINSIC (_ipcvttph_epi8) (s.x); + res2.x = INTRINSIC (_mask_ipcvttph_epi8) (res2.x, mask, s.x); + res3.x = INTRINSIC (_maskz_ipcvttph_epi8) (mask, s.x); #else - res1.x = INTRINSIC (_ipcvtt_roundph_epi16) (s.x, 8); - res2.x = INTRINSIC (_mask_ipcvtt_roundph_epi16) (res2.x, mask, s.x, 8); - res3.x = INTRINSIC (_maskz_ipcvtt_roundph_epi16) (mask, s.x, 8); + res1.x = INTRINSIC (_ipcvtt_roundph_epi8) (s.x, 8); + res2.x = INTRINSIC (_mask_ipcvtt_roundph_epi8) (res2.x, mask, s.x, 8); + res3.x = INTRINSIC (_maskz_ipcvtt_roundph_epi8) (mask, s.x, 8); #endif CALC (s.a, res_ref); diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttph2iubs-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttph2iubs-2.c index 00da5113d33..07f9c127518 100644 --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttph2iubs-2.c +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttph2iubs-2.c @@ -50,13 +50,13 @@ TEST (void) res2.a[i] = DEFAULT_VALUE; #if AVX512F_LEN == 128 - res1.x = INTRINSIC (_ipcvttph_epu16) (s.x); - res2.x = INTRINSIC (_mask_ipcvttph_epu16) (res2.x, mask, s.x); - res3.x = INTRINSIC (_maskz_ipcvttph_epu16) (mask, s.x); + res1.x = INTRINSIC (_ipcvttph_epu8) (s.x); + res2.x = INTRINSIC (_mask_ipcvttph_epu8) (res2.x, mask, s.x); + res3.x = INTRINSIC (_maskz_ipcvttph_epu8) (mask, s.x); #else - res1.x = INTRINSIC (_ipcvtt_roundph_epu16) (s.x, 8); - res2.x = INTRINSIC (_mask_ipcvtt_roundph_epu16) (res2.x, mask, s.x, 8); - res3.x = INTRINSIC (_maskz_ipcvtt_roundph_epu16) (mask, s.x, 8); + res1.x = INTRINSIC (_ipcvtt_roundph_epu8) (s.x, 8); + res2.x = INTRINSIC (_mask_ipcvtt_roundph_epu8) (res2.x, mask, s.x, 8); + res3.x = INTRINSIC (_maskz_ipcvtt_roundph_epu8) (mask, s.x, 8); #endif CALC (s.a, res_ref); diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttps2ibs-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttps2ibs-2.c index bfe74d846e0..b7b10303cdd 100644 --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttps2ibs-2.c +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttps2ibs-2.c @@ -51,13 +51,13 @@ TEST (void) res2.a[i] = DEFAULT_VALUE; #if AVX512F_LEN == 128 - res1.x = INTRINSIC (_ipcvttps_epi32) (s.x); - res2.x = INTRINSIC (_mask_ipcvttps_epi32) (res2.x, mask, s.x); - res3.x = INTRINSIC (_maskz_ipcvttps_epi32) (mask, s.x); + res1.x = INTRINSIC (_ipcvttps_epi8) (s.x); + res2.x = INTRINSIC (_mask_ipcvttps_epi8) (res2.x, mask, s.x); + res3.x = INTRINSIC (_maskz_ipcvttps_epi8) (mask, s.x); #else - res1.x = INTRINSIC (_ipcvtt_roundps_epi32) (s.x, 8); - res2.x = INTRINSIC (_mask_ipcvtt_roundps_epi32) (res2.x, mask, s.x, 8); - res3.x = INTRINSIC (_maskz_ipcvtt_roundps_epi32) (mask, s.x, 8); + res1.x = INTRINSIC (_ipcvtt_roundps_epi8) (s.x, 8); + res2.x = INTRINSIC (_mask_ipcvtt_roundps_epi8) (res2.x, mask, s.x, 8); + res3.x = INTRINSIC (_maskz_ipcvtt_roundps_epi8) (mask, s.x, 8); #endif CALC (s.a, res_ref); diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttps2iubs-2.c b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttps2iubs-2.c index 9de4f2a791a..f689ab7eece 100644 --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttps2iubs-2.c +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvttps2iubs-2.c @@ -49,13 +49,13 @@ TEST (void) res2.a[i] = DEFAULT_VALUE; #if AVX512F_LEN == 128 - res1.x = INTRINSIC (_ipcvttps_epu32) (s.x); - res2.x = INTRINSIC (_mask_ipcvttps_epu32) (res2.x, mask, s.x); - res3.x = INTRINSIC (_maskz_ipcvttps_epu32) (mask, s.x); + res1.x = INTRINSIC (_ipcvttps_epu8) (s.x); + res2.x = INTRINSIC (_mask_ipcvttps_epu8) (res2.x, mask, s.x); + res3.x = INTRINSIC (_maskz_ipcvttps_epu8) (mask, s.x); #else - res1.x = INTRINSIC (_ipcvtt_roundps_epu32) (s.x, 8); - res2.x = INTRINSIC (_mask_ipcvtt_roundps_epu32) (res2.x, mask, s.x, 8); - res3.x = INTRINSIC (_maskz_ipcvtt_roundps_epu32) (mask, s.x, 8); + res1.x = INTRINSIC (_ipcvtt_roundps_epu8) (s.x, 8); + res2.x = INTRINSIC (_mask_ipcvtt_roundps_epu8) (res2.x, mask, s.x, 8); + res3.x = INTRINSIC (_maskz_ipcvtt_roundps_epu8) (mask, s.x, 8); #endif CALC (s.a, res_ref); diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-satcvt-1.c b/gcc/testsuite/gcc.target/i386/avx10_2-satcvt-1.c index 7599fd1e21f..12649e62809 100644 --- a/gcc/testsuite/gcc.target/i386/avx10_2-satcvt-1.c +++ b/gcc/testsuite/gcc.target/i386/avx10_2-satcvt-1.c @@ -151,101 +151,101 @@ volatile unsigned long long ull; void extern avx10_2_test (void) { - xi = _mm256_ipcvt_roundph_epi16 (xh, 4); - xi = _mm256_mask_ipcvt_roundph_epi16 (xi, m16, xh, 8); - xi = _mm256_maskz_ipcvt_roundph_epi16 (m16, xh, 11); + xi = _mm256_ipcvt_roundph_epi8 (xh, 4); + xi = _mm256_mask_ipcvt_roundph_epi8 (xi, m16, xh, 8); + xi = _mm256_maskz_ipcvt_roundph_epi8 (m16, xh, 11); - xi = _mm256_ipcvt_roundph_epu16 (xh, 4); - xi = _mm256_mask_ipcvt_roundph_epu16 (xi, m16, xh, 8); - xi = _mm256_maskz_ipcvt_roundph_epu16 (m16, xh, 11); + xi = _mm256_ipcvt_roundph_epu8 (xh, 4); + xi = _mm256_mask_ipcvt_roundph_epu8 (xi, m16, xh, 8); + xi = _mm256_maskz_ipcvt_roundph_epu8 (m16, xh, 11); - xi = _mm256_ipcvtt_roundph_epi16 (xh, 4); - xi = _mm256_mask_ipcvtt_roundph_epi16 (xi, m16, xh, 8); - xi = _mm256_maskz_ipcvtt_roundph_epi16 (m16, xh, 8); + xi = _mm256_ipcvtt_roundph_epi8 (xh, 4); + xi = _mm256_mask_ipcvtt_roundph_epi8 (xi, m16, xh, 8); + xi = _mm256_maskz_ipcvtt_roundph_epi8 (m16, xh, 8); - xi = _mm256_ipcvtt_roundph_epu16 (xh, 4); - xi = _mm256_mask_ipcvtt_roundph_epu16 (xi, m16, xh, 8); - xi = _mm256_maskz_ipcvtt_roundph_epu16 (m16, xh, 8); + xi = _mm256_ipcvtt_roundph_epu8 (xh, 4); + xi = _mm256_mask_ipcvtt_roundph_epu8 (xi, m16, xh, 8); + xi = _mm256_maskz_ipcvtt_roundph_epu8 (m16, xh, 8); - xi = _mm256_ipcvt_roundps_epi32 (x, 4); - xi = _mm256_mask_ipcvt_roundps_epi32 (xi, m8, x, 8); - xi = _mm256_maskz_ipcvt_roundps_epi32 (m8, x, 11); + xi = _mm256_ipcvt_roundps_epi8 (x, 4); + xi = _mm256_mask_ipcvt_roundps_epi8 (xi, m8, x, 8); + xi = _mm256_maskz_ipcvt_roundps_epi8 (m8, x, 11); - xi = _mm256_ipcvt_roundps_epu32 (x, 4); - xi = _mm256_mask_ipcvt_roundps_epu32 (xi, m8, x, 8); - xi = _mm256_maskz_ipcvt_roundps_epu32 (m8, x, 11); + xi = _mm256_ipcvt_roundps_epu8 (x, 4); + xi = _mm256_mask_ipcvt_roundps_epu8 (xi, m8, x, 8); + xi = _mm256_maskz_ipcvt_roundps_epu8 (m8, x, 11); - xi = _mm256_ipcvtt_roundps_epi32 (x, 4); - xi = _mm256_mask_ipcvtt_roundps_epi32 (xi, m8, x, 8); - xi = _mm256_maskz_ipcvtt_roundps_epi32 (m8, x, 8); + xi = _mm256_ipcvtt_roundps_epi8 (x, 4); + xi = _mm256_mask_ipcvtt_roundps_epi8 (xi, m8, x, 8); + xi = _mm256_maskz_ipcvtt_roundps_epi8 (m8, x, 8); - xi = _mm256_ipcvtt_roundps_epu32 (x, 4); - xi = _mm256_mask_ipcvtt_roundps_epu32 (xi, m8, x, 8); - xi = _mm256_maskz_ipcvtt_roundps_epu32 (m8, x, 8); + xi = _mm256_ipcvtt_roundps_epu8 (x, 4); + xi = _mm256_mask_ipcvtt_roundps_epu8 (xi, m8, x, 8); + xi = _mm256_maskz_ipcvtt_roundps_epu8 (m8, x, 8); - xi = _mm256_ipcvtbf16_epi16 (xbh); - xi = _mm256_mask_ipcvtbf16_epi16 (xi, m16, xbh); - xi = _mm256_maskz_ipcvtbf16_epi16 (m16, xbh); + xi = _mm256_ipcvtbf16_epi8 (xbh); + xi = _mm256_mask_ipcvtbf16_epi8 (xi, m16, xbh); + xi = _mm256_maskz_ipcvtbf16_epi8 (m16, xbh); - xi = _mm256_ipcvtbf16_epu16 (xbh); - xi = _mm256_mask_ipcvtbf16_epu16 (xi, m16, xbh); - xi = _mm256_maskz_ipcvtbf16_epu16 (m16, xbh); + xi = _mm256_ipcvtbf16_epu8 (xbh); + xi = _mm256_mask_ipcvtbf16_epu8 (xi, m16, xbh); + xi = _mm256_maskz_ipcvtbf16_epu8 (m16, xbh); - xi = _mm256_ipcvttbf16_epi16 (xbh); - xi = _mm256_mask_ipcvttbf16_epi16 (xi, m16, xbh); - xi = _mm256_maskz_ipcvttbf16_epi16 (m16, xbh); + xi = _mm256_ipcvttbf16_epi8 (xbh); + xi = _mm256_mask_ipcvttbf16_epi8 (xi, m16, xbh); + xi = _mm256_maskz_ipcvttbf16_epi8 (m16, xbh); - xi = _mm256_ipcvttbf16_epu16 (xbh); - xi = _mm256_mask_ipcvttbf16_epu16 (xi, m16, xbh); - xi = _mm256_maskz_ipcvttbf16_epu16 (m16, xbh); + xi = _mm256_ipcvttbf16_epu8 (xbh); + xi = _mm256_mask_ipcvttbf16_epu8 (xi, m16, xbh); + xi = _mm256_maskz_ipcvttbf16_epu8 (m16, xbh); - hxi = _mm_ipcvtph_epi16 (hxh); - hxi = _mm_mask_ipcvtph_epi16 (hxi, m8, hxh); - hxi = _mm_maskz_ipcvtph_epi16 (m8, hxh); + hxi = _mm_ipcvtph_epi8 (hxh); + hxi = _mm_mask_ipcvtph_epi8 (hxi, m8, hxh); + hxi = _mm_maskz_ipcvtph_epi8 (m8, hxh); - hxi = _mm_ipcvtph_epu16 (hxh); - hxi = _mm_mask_ipcvtph_epu16 (hxi, m8, hxh); - hxi = _mm_maskz_ipcvtph_epu16 (m8, hxh); + hxi = _mm_ipcvtph_epu8 (hxh); + hxi = _mm_mask_ipcvtph_epu8 (hxi, m8, hxh); + hxi = _mm_maskz_ipcvtph_epu8 (m8, hxh); - hxi = _mm_ipcvttph_epi16 (hxh); - hxi = _mm_mask_ipcvttph_epi16 (hxi, m8, hxh); - hxi = _mm_maskz_ipcvttph_epi16 (m8, hxh); + hxi = _mm_ipcvttph_epi8 (hxh); + hxi = _mm_mask_ipcvttph_epi8 (hxi, m8, hxh); + hxi = _mm_maskz_ipcvttph_epi8 (m8, hxh); - hxi = _mm_ipcvttph_epu16 (hxh); - hxi = _mm_mask_ipcvttph_epu16 (hxi, m8, hxh); - hxi = _mm_maskz_ipcvttph_epu16 (m8, hxh); + hxi = _mm_ipcvttph_epu8 (hxh); + hxi = _mm_mask_ipcvttph_epu8 (hxi, m8, hxh); + hxi = _mm_maskz_ipcvttph_epu8 (m8, hxh); - hxi = _mm_ipcvtps_epi32 (hx); - hxi = _mm_mask_ipcvtps_epi32 (hxi, m8, hx); - hxi = _mm_maskz_ipcvtps_epi32 (m8, hx); + hxi = _mm_ipcvtps_epi8 (hx); + hxi = _mm_mask_ipcvtps_epi8 (hxi, m8, hx); + hxi = _mm_maskz_ipcvtps_epi8 (m8, hx); - hxi = _mm_ipcvtps_epu32 (hx); - hxi = _mm_mask_ipcvtps_epu32 (hxi, m8, hx); - hxi = _mm_maskz_ipcvtps_epu32 (m8, hx); + hxi = _mm_ipcvtps_epu8 (hx); + hxi = _mm_mask_ipcvtps_epu8 (hxi, m8, hx); + hxi = _mm_maskz_ipcvtps_epu8 (m8, hx); - hxi = _mm_ipcvttps_epi32 (hx); - hxi = _mm_mask_ipcvttps_epi32 (hxi, m8, hx); - hxi = _mm_maskz_ipcvttps_epi32 (m8, hx); + hxi = _mm_ipcvttps_epi8 (hx); + hxi = _mm_mask_ipcvttps_epi8 (hxi, m8, hx); + hxi = _mm_maskz_ipcvttps_epi8 (m8, hx); - hxi = _mm_ipcvttps_epu32 (hx); - hxi = _mm_mask_ipcvttps_epu32 (hxi, m8, hx); - hxi = _mm_maskz_ipcvttps_epu32 (m8, hx); + hxi = _mm_ipcvttps_epu8 (hx); + hxi = _mm_mask_ipcvttps_epu8 (hxi, m8, hx); + hxi = _mm_maskz_ipcvttps_epu8 (m8, hx); - hxi = _mm_ipcvtbf16_epi16 (hxbh); - hxi = _mm_mask_ipcvtbf16_epi16 (hxi, m8, hxbh); - hxi = _mm_maskz_ipcvtbf16_epi16 (m8, hxbh); + hxi = _mm_ipcvtbf16_epi8 (hxbh); + hxi = _mm_mask_ipcvtbf16_epi8 (hxi, m8, hxbh); + hxi = _mm_maskz_ipcvtbf16_epi8 (m8, hxbh); - hxi = _mm_ipcvtbf16_epu16 (hxbh); - hxi = _mm_mask_ipcvtbf16_epu16 (hxi, m8, hxbh); - hxi = _mm_maskz_ipcvtbf16_epu16 (m8, hxbh); + hxi = _mm_ipcvtbf16_epu8 (hxbh); + hxi = _mm_mask_ipcvtbf16_epu8 (hxi, m8, hxbh); + hxi = _mm_maskz_ipcvtbf16_epu8 (m8, hxbh); - hxi = _mm_ipcvttbf16_epi16 (hxbh); - hxi = _mm_mask_ipcvttbf16_epi16 (hxi, m8, hxbh); - hxi = _mm_maskz_ipcvttbf16_epi16 (m8, hxbh); + hxi = _mm_ipcvttbf16_epi8 (hxbh); + hxi = _mm_mask_ipcvttbf16_epi8 (hxi, m8, hxbh); + hxi = _mm_maskz_ipcvttbf16_epi8 (m8, hxbh); - hxi = _mm_ipcvttbf16_epu16 (hxbh); - hxi = _mm_mask_ipcvttbf16_epu16 (hxi, m8, hxbh); - hxi = _mm_maskz_ipcvttbf16_epu16 (m8, hxbh); + hxi = _mm_ipcvttbf16_epu8 (hxbh); + hxi = _mm_mask_ipcvttbf16_epu8 (hxi, m8, hxbh); + hxi = _mm_maskz_ipcvttbf16_epu8 (m8, hxbh); hxi = _mm256_cvtts_roundpd_epi32 (xd, 8); hxi = _mm256_mask_cvtts_roundpd_epi32 (hxi, m8, xd, 8); diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c b/gcc/testsuite/gcc.target/i386/sse-14.c index 16f03e260aa..840f296fb96 100644 --- a/gcc/testsuite/gcc.target/i386/sse-14.c +++ b/gcc/testsuite/gcc.target/i386/sse-14.c @@ -1433,30 +1433,30 @@ test_3 (_mm256_mask_cmp_pbh_mask, __mmask16, __mmask16, __m256bh, __m256bh, 1) test_3 (_mm_mask_cmp_pbh_mask, __mmask8, __mmask8, __m128bh, __m128bh, 1) /* avx10_2-512satcvtintrin.h */ -test_1 (_mm512_ipcvt_roundph_epi16, __m512i, __m512h, 8) -test_1 (_mm512_ipcvt_roundph_epu16, __m512i, __m512h, 8) -test_1 (_mm512_ipcvt_roundps_epi32, __m512i, __m512, 8) -test_1 (_mm512_ipcvt_roundps_epu32, __m512i, __m512, 8) -test_1 (_mm512_ipcvtt_roundph_epi16, __m512i, __m512h, 8) -test_1 (_mm512_ipcvtt_roundph_epu16, __m512i, __m512h, 8) -test_1 (_mm512_ipcvtt_roundps_epi32, __m512i, __m512, 8) -test_1 (_mm512_ipcvtt_roundps_epu32, __m512i, __m512, 8) -test_2 (_mm512_maskz_ipcvt_roundph_epi16, __m512i, __mmask32, __m512h, 8) -test_2 (_mm512_maskz_ipcvt_roundph_epu16, __m512i, __mmask32, __m512h, 8) -test_2 (_mm512_maskz_ipcvt_roundps_epi32, __m512i, __mmask16, __m512, 8) -test_2 (_mm512_maskz_ipcvt_roundps_epu32, __m512i, __mmask16, __m512, 8) -test_2 (_mm512_maskz_ipcvtt_roundph_epi16, __m512i, __mmask32, __m512h, 8) -test_2 (_mm512_maskz_ipcvtt_roundph_epu16, __m512i, __mmask32, __m512h, 8) -test_2 (_mm512_maskz_ipcvtt_roundps_epi32, __m512i, __mmask16, __m512, 8) -test_2 (_mm512_maskz_ipcvtt_roundps_epu32, __m512i, __mmask16, __m512, 8) -test_3 (_mm512_mask_ipcvt_roundph_epi16, __m512i, __m512i, __mmask32, __m512h, 8) -test_3 (_mm512_mask_ipcvt_roundph_epu16, __m512i, __m512i, __mmask32, __m512h, 8) -test_3 (_mm512_mask_ipcvt_roundps_epi32, __m512i, __m512i, __mmask16, __m512, 8) -test_3 (_mm512_mask_ipcvt_roundps_epu32, __m512i, __m512i, __mmask16, __m512, 8) -test_3 (_mm512_mask_ipcvtt_roundph_epi16, __m512i, __m512i, __mmask32, __m512h, 8) -test_3 (_mm512_mask_ipcvtt_roundph_epu16, __m512i, __m512i, __mmask32, __m512h, 8) -test_3 (_mm512_mask_ipcvtt_roundps_epi32, __m512i, __m512i, __mmask16, __m512, 8) -test_3 (_mm512_mask_ipcvtt_roundps_epu32, __m512i, __m512i, __mmask16, __m512, 8) +test_1 (_mm512_ipcvt_roundph_epi8, __m512i, __m512h, 8) +test_1 (_mm512_ipcvt_roundph_epu8, __m512i, __m512h, 8) +test_1 (_mm512_ipcvt_roundps_epi8, __m512i, __m512, 8) +test_1 (_mm512_ipcvt_roundps_epu8, __m512i, __m512, 8) +test_1 (_mm512_ipcvtt_roundph_epi8, __m512i, __m512h, 8) +test_1 (_mm512_ipcvtt_roundph_epu8, __m512i, __m512h, 8) +test_1 (_mm512_ipcvtt_roundps_epi8, __m512i, __m512, 8) +test_1 (_mm512_ipcvtt_roundps_epu8, __m512i, __m512, 8) +test_2 (_mm512_maskz_ipcvt_roundph_epi8, __m512i, __mmask32, __m512h, 8) +test_2 (_mm512_maskz_ipcvt_roundph_epu8, __m512i, __mmask32, __m512h, 8) +test_2 (_mm512_maskz_ipcvt_roundps_epi8, __m512i, __mmask16, __m512, 8) +test_2 (_mm512_maskz_ipcvt_roundps_epu8, __m512i, __mmask16, __m512, 8) +test_2 (_mm512_maskz_ipcvtt_roundph_epi8, __m512i, __mmask32, __m512h, 8) +test_2 (_mm512_maskz_ipcvtt_roundph_epu8, __m512i, __mmask32, __m512h, 8) +test_2 (_mm512_maskz_ipcvtt_roundps_epi8, __m512i, __mmask16, __m512, 8) +test_2 (_mm512_maskz_ipcvtt_roundps_epu8, __m512i, __mmask16, __m512, 8) +test_3 (_mm512_mask_ipcvt_roundph_epi8, __m512i, __m512i, __mmask32, __m512h, 8) +test_3 (_mm512_mask_ipcvt_roundph_epu8, __m512i, __m512i, __mmask32, __m512h, 8) +test_3 (_mm512_mask_ipcvt_roundps_epi8, __m512i, __m512i, __mmask16, __m512, 8) +test_3 (_mm512_mask_ipcvt_roundps_epu8, __m512i, __m512i, __mmask16, __m512, 8) +test_3 (_mm512_mask_ipcvtt_roundph_epi8, __m512i, __m512i, __mmask32, __m512h, 8) +test_3 (_mm512_mask_ipcvtt_roundph_epu8, __m512i, __m512i, __mmask32, __m512h, 8) +test_3 (_mm512_mask_ipcvtt_roundps_epi8, __m512i, __m512i, __mmask16, __m512, 8) +test_3 (_mm512_mask_ipcvtt_roundps_epu8, __m512i, __m512i, __mmask16, __m512, 8) test_1 (_mm512_cvtts_roundpd_epi32, __m256i, __m512d, 8) test_2 (_mm512_maskz_cvtts_roundpd_epi32, __m256i, __mmask8, __m512d, 8) test_3 (_mm512_mask_cvtts_roundpd_epi32, __m256i, __m256i, __mmask8, __m512d, 8) @@ -1483,30 +1483,30 @@ test_2 (_mm512_maskz_cvtts_roundps_epu64, __m512i, __mmask8, __m256, 8) test_3 (_mm512_mask_cvtts_roundps_epu64, __m512i, __m512i, __mmask8, __m256, 8) /* avx10_2satcvtintrin.h */ -test_1 (_mm256_ipcvt_roundph_epi16, __m256i, __m256h, 8) -test_1 (_mm256_ipcvt_roundph_epu16, __m256i, __m256h, 8) -test_1 (_mm256_ipcvt_roundps_epi32, __m256i, __m256, 8) -test_1 (_mm256_ipcvt_roundps_epu32, __m256i, __m256, 8) -test_1 (_mm256_ipcvtt_roundph_epi16, __m256i, __m256h, 8) -test_1 (_mm256_ipcvtt_roundph_epu16, __m256i, __m256h, 8) -test_1 (_mm256_ipcvtt_roundps_epi32, __m256i, __m256, 8) -test_1 (_mm256_ipcvtt_roundps_epu32, __m256i, __m256, 8) -test_2 (_mm256_maskz_ipcvt_roundph_epi16, __m256i, __mmask16, __m256h, 8) -test_2 (_mm256_maskz_ipcvt_roundph_epu16, __m256i, __mmask16, __m256h, 8) -test_2 (_mm256_maskz_ipcvt_roundps_epi32, __m256i, __mmask8, __m256, 8) -test_2 (_mm256_maskz_ipcvt_roundps_epu32, __m256i, __mmask8, __m256, 8) -test_2 (_mm256_maskz_ipcvtt_roundph_epi16, __m256i, __mmask16, __m256h, 8) -test_2 (_mm256_maskz_ipcvtt_roundph_epu16, __m256i, __mmask16, __m256h, 8) -test_2 (_mm256_maskz_ipcvtt_roundps_epi32, __m256i, __mmask8, __m256, 8) -test_2 (_mm256_maskz_ipcvtt_roundps_epu32, __m256i, __mmask8, __m256, 8) -test_3 (_mm256_mask_ipcvt_roundph_epi16, __m256i, __m256i, __mmask16, __m256h, 8) -test_3 (_mm256_mask_ipcvt_roundph_epu16, __m256i, __m256i, __mmask16, __m256h, 8) -test_3 (_mm256_mask_ipcvt_roundps_epi32, __m256i, __m256i, __mmask8, __m256, 8) -test_3 (_mm256_mask_ipcvt_roundps_epu32, __m256i, __m256i, __mmask8, __m256, 8) -test_3 (_mm256_mask_ipcvtt_roundph_epi16, __m256i, __m256i, __mmask16, __m256h, 8) -test_3 (_mm256_mask_ipcvtt_roundph_epu16, __m256i, __m256i, __mmask16, __m256h, 8) -test_3 (_mm256_mask_ipcvtt_roundps_epi32, __m256i, __m256i, __mmask8, __m256, 8) -test_3 (_mm256_mask_ipcvtt_roundps_epu32, __m256i, __m256i, __mmask8, __m256, 8) +test_1 (_mm256_ipcvt_roundph_epi8, __m256i, __m256h, 8) +test_1 (_mm256_ipcvt_roundph_epu8, __m256i, __m256h, 8) +test_1 (_mm256_ipcvt_roundps_epi8, __m256i, __m256, 8) +test_1 (_mm256_ipcvt_roundps_epu8, __m256i, __m256, 8) +test_1 (_mm256_ipcvtt_roundph_epi8, __m256i, __m256h, 8) +test_1 (_mm256_ipcvtt_roundph_epu8, __m256i, __m256h, 8) +test_1 (_mm256_ipcvtt_roundps_epi8, __m256i, __m256, 8) +test_1 (_mm256_ipcvtt_roundps_epu8, __m256i, __m256, 8) +test_2 (_mm256_maskz_ipcvt_roundph_epi8, __m256i, __mmask16, __m256h, 8) +test_2 (_mm256_maskz_ipcvt_roundph_epu8, __m256i, __mmask16, __m256h, 8) +test_2 (_mm256_maskz_ipcvt_roundps_epi8, __m256i, __mmask8, __m256, 8) +test_2 (_mm256_maskz_ipcvt_roundps_epu8, __m256i, __mmask8, __m256, 8) +test_2 (_mm256_maskz_ipcvtt_roundph_epi8, __m256i, __mmask16, __m256h, 8) +test_2 (_mm256_maskz_ipcvtt_roundph_epu8, __m256i, __mmask16, __m256h, 8) +test_2 (_mm256_maskz_ipcvtt_roundps_epi8, __m256i, __mmask8, __m256, 8) +test_2 (_mm256_maskz_ipcvtt_roundps_epu8, __m256i, __mmask8, __m256, 8) +test_3 (_mm256_mask_ipcvt_roundph_epi8, __m256i, __m256i, __mmask16, __m256h, 8) +test_3 (_mm256_mask_ipcvt_roundph_epu8, __m256i, __m256i, __mmask16, __m256h, 8) +test_3 (_mm256_mask_ipcvt_roundps_epi8, __m256i, __m256i, __mmask8, __m256, 8) +test_3 (_mm256_mask_ipcvt_roundps_epu8, __m256i, __m256i, __mmask8, __m256, 8) +test_3 (_mm256_mask_ipcvtt_roundph_epi8, __m256i, __m256i, __mmask16, __m256h, 8) +test_3 (_mm256_mask_ipcvtt_roundph_epu8, __m256i, __m256i, __mmask16, __m256h, 8) +test_3 (_mm256_mask_ipcvtt_roundps_epi8, __m256i, __m256i, __mmask8, __m256, 8) +test_3 (_mm256_mask_ipcvtt_roundps_epu8, __m256i, __m256i, __mmask8, __m256, 8) test_1 (_mm256_cvtts_roundpd_epi32, __m128i, __m256d, 8) test_2 (_mm256_maskz_cvtts_roundpd_epi32, __m128i, __mmask8, __m256d, 8) test_3 (_mm256_mask_cvtts_roundpd_epi32, __m128i, __m128i, __mmask8, __m256d, 8) diff --git a/gcc/testsuite/gcc.target/i386/sse-22.c b/gcc/testsuite/gcc.target/i386/sse-22.c index 4f22fee1858..5e7276e919c 100644 --- a/gcc/testsuite/gcc.target/i386/sse-22.c +++ b/gcc/testsuite/gcc.target/i386/sse-22.c @@ -1472,30 +1472,30 @@ test_3 (_mm256_mask_cmp_pbh_mask, __mmask16, __mmask16, __m256bh, __m256bh, 1) test_3 (_mm_mask_cmp_pbh_mask, __mmask8, __mmask8, __m128bh, __m128bh, 1) /* avx10_2-512satcvtintrin.h */ -test_1 (_mm512_ipcvt_roundph_epi16, __m512i, __m512h, 8) -test_1 (_mm512_ipcvt_roundph_epu16, __m512i, __m512h, 8) -test_1 (_mm512_ipcvt_roundps_epi32, __m512i, __m512, 8) -test_1 (_mm512_ipcvt_roundps_epu32, __m512i, __m512, 8) -test_1 (_mm512_ipcvtt_roundph_epi16, __m512i, __m512h, 8) -test_1 (_mm512_ipcvtt_roundph_epu16, __m512i, __m512h, 8) -test_1 (_mm512_ipcvtt_roundps_epi32, __m512i, __m512, 8) -test_1 (_mm512_ipcvtt_roundps_epu32, __m512i, __m512, 8) -test_2 (_mm512_maskz_ipcvt_roundph_epi16, __m512i, __mmask32, __m512h, 8) -test_2 (_mm512_maskz_ipcvt_roundph_epu16, __m512i, __mmask32, __m512h, 8) -test_2 (_mm512_maskz_ipcvt_roundps_epi32, __m512i, __mmask16, __m512, 8) -test_2 (_mm512_maskz_ipcvt_roundps_epu32, __m512i, __mmask16, __m512, 8) -test_2 (_mm512_maskz_ipcvtt_roundph_epi16, __m512i, __mmask32, __m512h, 8) -test_2 (_mm512_maskz_ipcvtt_roundph_epu16, __m512i, __mmask32, __m512h, 8) -test_2 (_mm512_maskz_ipcvtt_roundps_epi32, __m512i, __mmask16, __m512, 8) -test_2 (_mm512_maskz_ipcvtt_roundps_epu32, __m512i, __mmask16, __m512, 8) -test_3 (_mm512_mask_ipcvt_roundph_epi16, __m512i, __m512i, __mmask32, __m512h, 8) -test_3 (_mm512_mask_ipcvt_roundph_epu16, __m512i, __m512i, __mmask32, __m512h, 8) -test_3 (_mm512_mask_ipcvt_roundps_epi32, __m512i, __m512i, __mmask16, __m512, 8) -test_3 (_mm512_mask_ipcvt_roundps_epu32, __m512i, __m512i, __mmask16, __m512, 8) -test_3 (_mm512_mask_ipcvtt_roundph_epi16, __m512i, __m512i, __mmask32, __m512h, 8) -test_3 (_mm512_mask_ipcvtt_roundph_epu16, __m512i, __m512i, __mmask32, __m512h, 8) -test_3 (_mm512_mask_ipcvtt_roundps_epi32, __m512i, __m512i, __mmask16, __m512, 8) -test_3 (_mm512_mask_ipcvtt_roundps_epu32, __m512i, __m512i, __mmask16, __m512, 8) +test_1 (_mm512_ipcvt_roundph_epi8, __m512i, __m512h, 8) +test_1 (_mm512_ipcvt_roundph_epu8, __m512i, __m512h, 8) +test_1 (_mm512_ipcvt_roundps_epi8, __m512i, __m512, 8) +test_1 (_mm512_ipcvt_roundps_epu8, __m512i, __m512, 8) +test_1 (_mm512_ipcvtt_roundph_epi8, __m512i, __m512h, 8) +test_1 (_mm512_ipcvtt_roundph_epu8, __m512i, __m512h, 8) +test_1 (_mm512_ipcvtt_roundps_epi8, __m512i, __m512, 8) +test_1 (_mm512_ipcvtt_roundps_epu8, __m512i, __m512, 8) +test_2 (_mm512_maskz_ipcvt_roundph_epi8, __m512i, __mmask32, __m512h, 8) +test_2 (_mm512_maskz_ipcvt_roundph_epu8, __m512i, __mmask32, __m512h, 8) +test_2 (_mm512_maskz_ipcvt_roundps_epi8, __m512i, __mmask16, __m512, 8) +test_2 (_mm512_maskz_ipcvt_roundps_epu8, __m512i, __mmask16, __m512, 8) +test_2 (_mm512_maskz_ipcvtt_roundph_epi8, __m512i, __mmask32, __m512h, 8) +test_2 (_mm512_maskz_ipcvtt_roundph_epu8, __m512i, __mmask32, __m512h, 8) +test_2 (_mm512_maskz_ipcvtt_roundps_epi8, __m512i, __mmask16, __m512, 8) +test_2 (_mm512_maskz_ipcvtt_roundps_epu8, __m512i, __mmask16, __m512, 8) +test_3 (_mm512_mask_ipcvt_roundph_epi8, __m512i, __m512i, __mmask32, __m512h, 8) +test_3 (_mm512_mask_ipcvt_roundph_epu8, __m512i, __m512i, __mmask32, __m512h, 8) +test_3 (_mm512_mask_ipcvt_roundps_epi8, __m512i, __m512i, __mmask16, __m512, 8) +test_3 (_mm512_mask_ipcvt_roundps_epu8, __m512i, __m512i, __mmask16, __m512, 8) +test_3 (_mm512_mask_ipcvtt_roundph_epi8, __m512i, __m512i, __mmask32, __m512h, 8) +test_3 (_mm512_mask_ipcvtt_roundph_epu8, __m512i, __m512i, __mmask32, __m512h, 8) +test_3 (_mm512_mask_ipcvtt_roundps_epi8, __m512i, __m512i, __mmask16, __m512, 8) +test_3 (_mm512_mask_ipcvtt_roundps_epu8, __m512i, __m512i, __mmask16, __m512, 8) test_1 (_mm512_cvtts_roundpd_epi32, __m256i, __m512d, 8) test_2 (_mm512_maskz_cvtts_roundpd_epi32, __m256i, __mmask8, __m512d, 8) test_3 (_mm512_mask_cvtts_roundpd_epi32, __m256i, __m256i, __mmask8, __m512d, 8) @@ -1522,30 +1522,30 @@ test_2 (_mm512_maskz_cvtts_roundps_epu64, __m512i, __mmask8, __m256, 8) test_3 (_mm512_mask_cvtts_roundps_epu64, __m512i, __m512i, __mmask8, __m256, 8) /* avx10_2satcvtintrin.h */ -test_1 (_mm256_ipcvt_roundph_epi16, __m256i, __m256h, 8) -test_1 (_mm256_ipcvt_roundph_epu16, __m256i, __m256h, 8) -test_1 (_mm256_ipcvt_roundps_epi32, __m256i, __m256, 8) -test_1 (_mm256_ipcvt_roundps_epu32, __m256i, __m256, 8) -test_1 (_mm256_ipcvtt_roundph_epi16, __m256i, __m256h, 8) -test_1 (_mm256_ipcvtt_roundph_epu16, __m256i, __m256h, 8) -test_1 (_mm256_ipcvtt_roundps_epi32, __m256i, __m256, 8) -test_1 (_mm256_ipcvtt_roundps_epu32, __m256i, __m256, 8) -test_2 (_mm256_maskz_ipcvt_roundph_epi16, __m256i, __mmask16, __m256h, 8) -test_2 (_mm256_maskz_ipcvt_roundph_epu16, __m256i, __mmask16, __m256h, 8) -test_2 (_mm256_maskz_ipcvt_roundps_epi32, __m256i, __mmask8, __m256, 8) -test_2 (_mm256_maskz_ipcvt_roundps_epu32, __m256i, __mmask8, __m256, 8) -test_2 (_mm256_maskz_ipcvtt_roundph_epi16, __m256i, __mmask16, __m256h, 8) -test_2 (_mm256_maskz_ipcvtt_roundph_epu16, __m256i, __mmask16, __m256h, 8) -test_2 (_mm256_maskz_ipcvtt_roundps_epi32, __m256i, __mmask8, __m256, 8) -test_2 (_mm256_maskz_ipcvtt_roundps_epu32, __m256i, __mmask8, __m256, 8) -test_3 (_mm256_mask_ipcvt_roundph_epi16, __m256i, __m256i, __mmask16, __m256h, 8) -test_3 (_mm256_mask_ipcvt_roundph_epu16, __m256i, __m256i, __mmask16, __m256h, 8) -test_3 (_mm256_mask_ipcvt_roundps_epi32, __m256i, __m256i, __mmask8, __m256, 8) -test_3 (_mm256_mask_ipcvt_roundps_epu32, __m256i, __m256i, __mmask8, __m256, 8) -test_3 (_mm256_mask_ipcvtt_roundph_epi16, __m256i, __m256i, __mmask16, __m256h, 8) -test_3 (_mm256_mask_ipcvtt_roundph_epu16, __m256i, __m256i, __mmask16, __m256h, 8) -test_3 (_mm256_mask_ipcvtt_roundps_epi32, __m256i, __m256i, __mmask8, __m256, 8) -test_3 (_mm256_mask_ipcvtt_roundps_epu32, __m256i, __m256i, __mmask8, __m256, 8) +test_1 (_mm256_ipcvt_roundph_epi8, __m256i, __m256h, 8) +test_1 (_mm256_ipcvt_roundph_epu8, __m256i, __m256h, 8) +test_1 (_mm256_ipcvt_roundps_epi8, __m256i, __m256, 8) +test_1 (_mm256_ipcvt_roundps_epu8, __m256i, __m256, 8) +test_1 (_mm256_ipcvtt_roundph_epi8, __m256i, __m256h, 8) +test_1 (_mm256_ipcvtt_roundph_epu8, __m256i, __m256h, 8) +test_1 (_mm256_ipcvtt_roundps_epi8, __m256i, __m256, 8) +test_1 (_mm256_ipcvtt_roundps_epu8, __m256i, __m256, 8) +test_2 (_mm256_maskz_ipcvt_roundph_epi8, __m256i, __mmask16, __m256h, 8) +test_2 (_mm256_maskz_ipcvt_roundph_epu8, __m256i, __mmask16, __m256h, 8) +test_2 (_mm256_maskz_ipcvt_roundps_epi8, __m256i, __mmask8, __m256, 8) +test_2 (_mm256_maskz_ipcvt_roundps_epu8, __m256i, __mmask8, __m256, 8) +test_2 (_mm256_maskz_ipcvtt_roundph_epi8, __m256i, __mmask16, __m256h, 8) +test_2 (_mm256_maskz_ipcvtt_roundph_epu8, __m256i, __mmask16, __m256h, 8) +test_2 (_mm256_maskz_ipcvtt_roundps_epi8, __m256i, __mmask8, __m256, 8) +test_2 (_mm256_maskz_ipcvtt_roundps_epu8, __m256i, __mmask8, __m256, 8) +test_3 (_mm256_mask_ipcvt_roundph_epi8, __m256i, __m256i, __mmask16, __m256h, 8) +test_3 (_mm256_mask_ipcvt_roundph_epu8, __m256i, __m256i, __mmask16, __m256h, 8) +test_3 (_mm256_mask_ipcvt_roundps_epi8, __m256i, __m256i, __mmask8, __m256, 8) +test_3 (_mm256_mask_ipcvt_roundps_epu8, __m256i, __m256i, __mmask8, __m256, 8) +test_3 (_mm256_mask_ipcvtt_roundph_epi8, __m256i, __m256i, __mmask16, __m256h, 8) +test_3 (_mm256_mask_ipcvtt_roundph_epu8, __m256i, __m256i, __mmask16, __m256h, 8) +test_3 (_mm256_mask_ipcvtt_roundps_epi8, __m256i, __m256i, __mmask8, __m256, 8) +test_3 (_mm256_mask_ipcvtt_roundps_epu8, __m256i, __m256i, __mmask8, __m256, 8) test_1 (_mm256_cvtts_roundpd_epi32, __m128i, __m256d, 8) test_2 (_mm256_maskz_cvtts_roundpd_epi32, __m128i, __mmask8, __m256d, 8) test_3 (_mm256_mask_cvtts_roundpd_epi32, __m128i, __m128i, __mmask8, __m256d, 8) -- 2.31.1