> -----Original Message-----
> From: Hu, Lin1 <lin1...@intel.com>
> Sent: Tuesday, March 25, 2025 4:23 PM
> To: gcc-patches@gcc.gnu.org
> Cc: Liu, Hongtao <hongtao....@intel.com>; ubiz...@gmail.com
> Subject: RE: [PATCH v2] i386: Add "s_" as Saturation for AVX10.2 Converting
> Intrinsics.
>
> More details: Alignment with llvm (https://github.com/llvm/llvm-
> project/pull/131592)
>
> BRs,
> Lin
>
> > -----Original Message-----
> > From: Hu, Lin1 <lin1...@intel.com>
> > Sent: Tuesday, March 25, 2025 4:10 PM
> > To: gcc-patches@gcc.gnu.org
> > Cc: Liu, Hongtao <hongtao....@intel.com>; ubiz...@gmail.com
> > Subject: [PATCH v2] i386: Add "s_" as Saturation for AVX10.2
> > Converting Intrinsics.
> >
> > Modify ChangeLog.
> >
> > This patch aims to add "s_" after 'cvt' represent saturation.
> >
> > gcc/ChangeLog:
> >
> > * config/i386/avx10_2-512convertintrin.h
> (_mm512_mask_cvtx2ps_ph):
> > Formatting fixes
> > (_mm512_mask_cvtx_round2ps_ph): Ditto
> > (_mm512_maskz_cvtx_round2ps_ph): Ditto
> > (_mm512_cvtbiassph_bf8): Rename to _mm512_cvts_biasph_bf8.
> > (_mm512_mask_cvtbiassph_bf8): Rename to
> _mm512_mask_cvts_biasph_bf8.
> > (_mm512_maskz_cvtbiassph_bf8): Rename to
> > _mm512_maskz_cvts_biasph_bf8.
> > (_mm512_cvtbiassph_hf8): Rename to _mm512_cvts_biasph_hf8.
> > (_mm512_mask_cvtbiassph_hf8): Rename to
> _mm512_mask_cvts_biasph_hf8.
> > (_mm512_maskz_cvtbiassph_hf8): Rename to
> > _mm512_maskz_cvts_biasph_hf8.
> > (_mm512_cvts2ph_bf8): Rename to _mm512_cvts_2ph_bf8.
> > (_mm512_mask_cvts2ph_bf8): Rename to
> > _mm512_mask_cvts_2ph_bf8.
> > (_mm512_maskz_cvts2ph_bf8): Rename to
> _mm512_maskz_cvts_2ph_bf8.
> > (_mm512_cvts2ph_hf8): Rename to _mm512_cvts_2ph_hf8.
> > (_mm512_mask_cvts2ph_hf8): Rename to
> > _mm512_mask_cvts_2ph_hf8.
> > (_mm512_maskz_cvts2ph_hf8): Rename to
> _mm512_maskz_cvts_2ph_hf8.
> > (_mm512_cvtsph_bf8): Rename to _mm512_cvts_ph_bf8.
> > (_mm512_mask_cvtsph_bf8): Rename to
> _mm512_mask_cvts_ph_bf8.
> > (_mm512_maskz_cvtsph_bf8): Rename to
> _mm512_maskz_cvts_ph_bf8.
> > (_mm512_cvtsph_hf8): Rename to _mm512_cvts_ph_hf8.
> > (_mm512_mask_cvtsph_hf8): Rename to
> _mm512_mask_cvts_ph_hf8.
> > (_mm512_maskz_cvtsph_hf8): Rename to
> _mm512_maskz_cvts_ph_hf8.
> > * config/i386/avx10_2convertintrin.h
> > (_mm_cvtbiassph_bf8): Rename to _mm_cvts_biasph_bf8.
> > (_mm_mask_cvtbiassph_bf8): Rename to
> _mm_mask_cvts_biasph_bf8.
> > (_mm_maskz_cvtbiassph_bf8): Rename to
> _mm_maskz_cvts_biasph_bf8.
> > (_mm256_cvtbiassph_bf8): Rename to _mm256_cvts_biasph_bf8.
> > (_mm256_mask_cvtbiassph_bf8): Rename to
> _mm256_mask_cvts_biasph_bf8.
> > (_mm256_maskz_cvtbiassph_bf8): Rename to
> > _mm256_maskz_cvts_biasph_bf8.
> > (_mm_cvtbiassph_hf8): Rename to _mm_cvts_biasph_hf8.
> > (_mm_mask_cvtbiassph_hf8): Rename to
> _mm_mask_cvts_biasph_hf8.
> > (_mm_maskz_cvtbiassph_hf8): Rename to
> _mm_maskz_cvts_biasph_hf8.
> > (_mm256_cvtbiassph_hf8): Rename to _mm256_cvts_biasph_hf8.
> > (_mm256_mask_cvtbiassph_hf8): Rename to
> _mm256_mask_cvts_biasph_hf8.
> > (_mm256_maskz_cvtbiassph_hf8): Rename to
> > _mm256_maskz_cvts_biasph_hf8.
> > (_mm_cvts2ph_bf8): Rename to _mm_cvts_2ph_bf8.
> > (_mm_mask_cvts2ph_bf8): Rename to _mm_mask_cvts_2ph_bf8.
> > (_mm_maskz_cvts2ph_bf8): Rename to _mm_maskz_cvts_2ph_bf8.
> > (_mm256_cvts2ph_bf8): Rename to _mm256_cvts_2ph_bf8.
> > (_mm256_mask_cvts2ph_bf8): Rename to
> > _mm256_mask_cvts_2ph_bf8.
> > (_mm256_maskz_cvts2ph_bf8): Rename to
> _mm256_maskz_cvts_2ph_bf8.
> > (_mm_cvts2ph_hf8): Rename to _mm_cvts_2ph_hf8.
> > (_mm_mask_cvts2ph_hf8): Rename to _mm_mask_cvts_2ph_hf8.
> > (_mm_maskz_cvts2ph_hf8): Rename to _mm_maskz_cvts_2ph_hf8.
> > (_mm256_cvts2ph_hf8): Rename to _mm256_cvts_2ph_hf8.
> > (_mm256_mask_cvts2ph_hf8): Rename to
> > _mm256_mask_cvts_2ph_hf8.
> > (_mm256_maskz_cvts2ph_hf8): Rename to
> _mm256_maskz_cvts_2ph_hf8.
> > (_mm_cvtsph_bf8): Rename to _mm_cvts_ph_bf8.
> > (_mm_mask_cvtsph_bf8): Rename to _mm_mask_cvts_ph_bf8.
> > (_mm_maskz_cvtsph_bf8): Rename to _mm_maskz_cvts_ph_bf8.
> > (_mm256_cvtsph_bf8): Rename to _mm256_cvts_ph_bf8.
> > (_mm256_mask_cvtsph_bf8): Rename to
> _mm256_mask_cvts_ph_bf8.
> > (_mm256_maskz_cvtsph_bf8): Rename to
> _mm256_maskz_cvts_ph_bf8.
> > (_mm_cvtsph_hf8): Rename to _mm_cvts_ph_hf8.
> > (_mm_mask_cvtsph_hf8): Rename to _mm_mask_cvts_ph_hf8.
> > (_mm_maskz_cvtsph_hf8): Rename to _mm_maskz_cvts_ph_hf8.
> > (_mm256_cvtsph_hf8): Rename to _mm256_cvts_ph_hf8.
> > (_mm256_mask_cvtsph_hf8): Rename to
> _mm256_mask_cvts_ph_hf8.
> > (_mm256_maskz_cvtsph_hf8): Rename to
> _mm256_maskz_cvts_ph_hf8.
Ok, thanks for Jakub's comments.
> >
> > gcc/testsuite/ChangeLog:
> >
> > * gcc.target/i386/avx10_2-512-convert-1.c: Modify function name
> > to follow the latest version.
> > * gcc.target/i386/avx10_2-512-vcvt2ph2bf8s-2.c: Ditto.
> > * gcc.target/i386/avx10_2-512-vcvt2ph2hf8s-2.c: Ditto.
> > * gcc.target/i386/avx10_2-512-vcvtbiasph2bf8s-2.c: Ditto.
> > * gcc.target/i386/avx10_2-512-vcvtbiasph2hf8s-2.c: Ditto.
> > * gcc.target/i386/avx10_2-512-vcvtph2bf8s-2.c: Ditto.
> > * gcc.target/i386/avx10_2-512-vcvtph2hf8s-2.c: Ditto.
> > * gcc.target/i386/avx10_2-convert-1.c: Ditto.
> > ---
> > gcc/config/i386/avx10_2-512convertintrin.h | 50 +++++------
> > gcc/config/i386/avx10_2convertintrin.h | 88 +++++++++----------
> > .../gcc.target/i386/avx10_2-512-convert-1.c | 36 ++++----
> > .../i386/avx10_2-512-vcvt2ph2bf8s-2.c | 6 +-
> > .../i386/avx10_2-512-vcvt2ph2hf8s-2.c | 6 +-
> > .../i386/avx10_2-512-vcvtbiasph2bf8s-2.c | 6 +-
> > .../i386/avx10_2-512-vcvtbiasph2hf8s-2.c | 6 +-
> > .../i386/avx10_2-512-vcvtph2bf8s-2.c | 6 +-
> > .../i386/avx10_2-512-vcvtph2hf8s-2.c | 6 +-
> > .../gcc.target/i386/avx10_2-convert-1.c | 72 +++++++--------
> > 10 files changed, 141 insertions(+), 141 deletions(-)
> >
> > diff --git a/gcc/config/i386/avx10_2-512convertintrin.h
> > b/gcc/config/i386/avx10_2-512convertintrin.h
> > index 8007cf36d76..611a40d83e2 100644
> > --- a/gcc/config/i386/avx10_2-512convertintrin.h
> > +++ b/gcc/config/i386/avx10_2-512convertintrin.h
> > @@ -49,7 +49,7 @@ _mm512_cvtx2ps_ph (__m512 __A, __m512 __B)
> extern
> > __inline __m512h __attribute__ ((__gnu_inline__, __always_inline__,
> > __artificial__)) _mm512_mask_cvtx2ps_ph (__m512h __W, __mmask32
> __U,
> > __m512 __A,
> > - __m512 __B)
> > + __m512 __B)
> > {
> > return (__m512h) __builtin_ia32_vcvt2ps2phx512_mask_round
> ((__v16sf) __A,
> > (__v16sf) __B,
> > @@ -86,7 +86,7 @@ _mm512_cvtx_round2ps_ph (__m512 __A, __m512
> __B,
> > const int __R) extern __inline __m512h __attribute__
> > ((__gnu_inline__, __always_inline__, __artificial__))
> > _mm512_mask_cvtx_round2ps_ph (__m512h __W, __mmask32 __U,
> __m512 __A,
> > - __m512 __B, const int __R)
> > + __m512 __B, const int __R)
> > {
> > return (__m512h) __builtin_ia32_vcvt2ps2phx512_mask_round
> ((__v16sf) __A,
> > (__v16sf) __B,
> > @@ -98,7 +98,7 @@ _mm512_mask_cvtx_round2ps_ph (__m512h __W,
> > __mmask32 __U, __m512 __A, extern __inline __m512h __attribute__
> > ((__gnu_inline__, __always_inline__, __artificial__))
> > _mm512_maskz_cvtx_round2ps_ph (__mmask32 __U, __m512 __A,
> > - __m512 __B, const int __R)
> > + __m512 __B, const int __R)
> > {
> > return (__m512h) __builtin_ia32_vcvt2ps2phx512_mask_round
> ((__v16sf) __A,
> > (__v16sf) __B,
> > @@ -166,7 +166,7 @@ _mm512_maskz_cvtbiasph_bf8 (__mmask32 __U,
> __m512i
> > __A, __m512h __B)
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_cvtbiassph_bf8 (__m512i __A, __m512h __B)
> > +_mm512_cvts_biasph_bf8 (__m512i __A, __m512h __B)
> > {
> > return (__m256i) __builtin_ia32_vcvtbiasph2bf8s512_mask ((__v64qi)
> __A,
> > (__v32hf) __B,
> > @@ -177,8 +177,8 @@ _mm512_cvtbiassph_bf8 (__m512i __A, __m512h
> __B)
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_mask_cvtbiassph_bf8 (__m256i __W, __mmask32 __U,
> > - __m512i __A, __m512h __B)
> > +_mm512_mask_cvts_biasph_bf8 (__m256i __W, __mmask32 __U,
> > + __m512i __A, __m512h __B)
> > {
> > return (__m256i) __builtin_ia32_vcvtbiasph2bf8s512_mask ((__v64qi)
> __A,
> > (__v32hf) __B,
> > @@ -188,7 +188,7 @@ _mm512_mask_cvtbiassph_bf8 (__m256i __W,
> > __mmask32 __U,
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_maskz_cvtbiassph_bf8 (__mmask32 __U, __m512i __A,
> __m512h __B)
> > +_mm512_maskz_cvts_biasph_bf8 (__mmask32 __U, __m512i __A,
> __m512h
> > __B)
> > {
> > return (__m256i) __builtin_ia32_vcvtbiasph2bf8s512_mask ((__v64qi)
> __A,
> > (__v32hf) __B,
> > @@ -232,7 +232,7 @@ _mm512_maskz_cvtbiasph_hf8 (__mmask32 __U,
> __m512i
> > __A, __m512h __B)
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_cvtbiassph_hf8 (__m512i __A, __m512h __B)
> > +_mm512_cvts_biasph_hf8 (__m512i __A, __m512h __B)
> > {
> > return (__m256i) __builtin_ia32_vcvtbiasph2hf8s512_mask ((__v64qi)
> __A,
> > (__v32hf) __B,
> > @@ -243,8 +243,8 @@ _mm512_cvtbiassph_hf8 (__m512i __A, __m512h
> __B)
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_mask_cvtbiassph_hf8 (__m256i __W, __mmask32 __U,
> > - __m512i __A, __m512h __B)
> > +_mm512_mask_cvts_biasph_hf8 (__m256i __W, __mmask32 __U,
> > + __m512i __A, __m512h __B)
> > {
> > return (__m256i) __builtin_ia32_vcvtbiasph2hf8s512_mask ((__v64qi)
> __A,
> > (__v32hf) __B,
> > @@ -254,7 +254,7 @@ _mm512_mask_cvtbiassph_hf8 (__m256i __W,
> > __mmask32 __U,
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_maskz_cvtbiassph_hf8 (__mmask32 __U, __m512i __A,
> __m512h __B)
> > +_mm512_maskz_cvts_biasph_hf8 (__mmask32 __U, __m512i __A,
> __m512h
> > __B)
> > {
> > return (__m256i) __builtin_ia32_vcvtbiasph2hf8s512_mask ((__v64qi)
> __A,
> > (__v32hf) __B,
> > @@ -298,7 +298,7 @@ _mm512_maskz_cvt2ph_bf8 (__mmask64 __U,
> __m512h
> > __A, __m512h __B)
> >
> > extern __inline__ __m512i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_cvts2ph_bf8 (__m512h __A, __m512h __B)
> > +_mm512_cvts_2ph_bf8 (__m512h __A, __m512h __B)
> > {
> > return (__m512i) __builtin_ia32_vcvt2ph2bf8s512_mask ((__v32hf) __A,
> > (__v32hf) __B,
> > @@ -309,8 +309,8 @@ _mm512_cvts2ph_bf8 (__m512h __A, __m512h
> __B)
> >
> > extern __inline__ __m512i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_mask_cvts2ph_bf8 (__m512i __W, __mmask64 __U,
> > - __m512h __A, __m512h __B)
> > +_mm512_mask_cvts_2ph_bf8 (__m512i __W, __mmask64 __U,
> > + __m512h __A, __m512h __B)
> > {
> > return (__m512i) __builtin_ia32_vcvt2ph2bf8s512_mask ((__v32hf) __A,
> > (__v32hf) __B,
> > @@ -320,7 +320,7 @@ _mm512_mask_cvts2ph_bf8 (__m512i __W,
> > __mmask64 __U,
> >
> > extern __inline__ __m512i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_maskz_cvts2ph_bf8 (__mmask64 __U, __m512h __A, __m512h
> __B)
> > +_mm512_maskz_cvts_2ph_bf8 (__mmask64 __U, __m512h __A,
> __m512h __B)
> > {
> > return (__m512i) __builtin_ia32_vcvt2ph2bf8s512_mask ((__v32hf) __A,
> > (__v32hf) __B,
> > @@ -364,7 +364,7 @@ _mm512_maskz_cvt2ph_hf8 (__mmask64 __U,
> __m512h
> > __A, __m512h __B)
> >
> > extern __inline__ __m512i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_cvts2ph_hf8 (__m512h __A, __m512h __B)
> > +_mm512_cvts_2ph_hf8 (__m512h __A, __m512h __B)
> > {
> > return (__m512i) __builtin_ia32_vcvt2ph2hf8s512_mask ((__v32hf) __A,
> > (__v32hf) __B,
> > @@ -375,8 +375,8 @@ _mm512_cvts2ph_hf8 (__m512h __A, __m512h
> __B)
> >
> > extern __inline__ __m512i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_mask_cvts2ph_hf8 (__m512i __W, __mmask64 __U,
> > - __m512h __A, __m512h __B)
> > +_mm512_mask_cvts_2ph_hf8 (__m512i __W, __mmask64 __U,
> > + __m512h __A, __m512h __B)
> > {
> > return (__m512i) __builtin_ia32_vcvt2ph2hf8s512_mask ((__v32hf) __A,
> > (__v32hf) __B,
> > @@ -386,7 +386,7 @@ _mm512_mask_cvts2ph_hf8 (__m512i __W,
> > __mmask64 __U,
> >
> > extern __inline__ __m512i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_maskz_cvts2ph_hf8 (__mmask64 __U, __m512h __A, __m512h
> __B)
> > +_mm512_maskz_cvts_2ph_hf8 (__mmask64 __U, __m512h __A,
> __m512h __B)
> > {
> > return (__m512i) __builtin_ia32_vcvt2ph2hf8s512_mask ((__v32hf) __A,
> > (__v32hf) __B,
> > @@ -455,7 +455,7 @@ _mm512_maskz_cvtph_bf8 (__mmask32 __U,
> __m512h
> > __A)
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_cvtsph_bf8 (__m512h __A)
> > +_mm512_cvts_ph_bf8 (__m512h __A)
> > {
> > return (__m256i) __builtin_ia32_vcvtph2bf8s512_mask ((__v32hf) __A,
> > (__v32qi) (__m256i)
> > @@ -465,7 +465,7 @@ _mm512_cvtsph_bf8 (__m512h __A)
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_mask_cvtsph_bf8 (__m256i __W, __mmask32 __U, __m512h
> __A)
> > +_mm512_mask_cvts_ph_bf8 (__m256i __W, __mmask32 __U, __m512h
> __A)
> > {
> > return (__m256i) __builtin_ia32_vcvtph2bf8s512_mask ((__v32hf) __A,
> > (__v32qi) (__m256i) __W,
> @@ -474,7 +474,7 @@
> > _mm512_mask_cvtsph_bf8 (__m256i __W, __mmask32 __U, __m512h
> __A)
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_maskz_cvtsph_bf8 (__mmask32 __U, __m512h __A)
> > +_mm512_maskz_cvts_ph_bf8 (__mmask32 __U, __m512h __A)
> > {
> > return (__m256i) __builtin_ia32_vcvtph2bf8s512_mask ((__v32hf) __A,
> > (__v32qi) (__m256i)
> > @@ -513,7 +513,7 @@ _mm512_maskz_cvtph_hf8 (__mmask32 __U,
> __m512h
> > __A)
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_cvtsph_hf8 (__m512h __A)
> > +_mm512_cvts_ph_hf8 (__m512h __A)
> > {
> > return (__m256i) __builtin_ia32_vcvtph2hf8s512_mask ((__v32hf) __A,
> > (__v32qi) (__m256i)
> > @@ -523,7 +523,7 @@ _mm512_cvtsph_hf8 (__m512h __A)
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_mask_cvtsph_hf8 (__m256i __W, __mmask32 __U, __m512h
> __A)
> > +_mm512_mask_cvts_ph_hf8 (__m256i __W, __mmask32 __U, __m512h
> __A)
> > {
> > return (__m256i) __builtin_ia32_vcvtph2hf8s512_mask ((__v32hf) __A,
> > (__v32qi) (__m256i) __W,
> @@ -532,7 +532,7 @@
> > _mm512_mask_cvtsph_hf8 (__m256i __W, __mmask32 __U, __m512h
> __A)
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm512_maskz_cvtsph_hf8 (__mmask32 __U, __m512h __A)
> > +_mm512_maskz_cvts_ph_hf8 (__mmask32 __U, __m512h __A)
> > {
> > return (__m256i) __builtin_ia32_vcvtph2hf8s512_mask ((__v32hf) __A,
> > (__v32qi) (__m256i)
> > diff --git a/gcc/config/i386/avx10_2convertintrin.h
> > b/gcc/config/i386/avx10_2convertintrin.h
> > index 6144723bb6b..8cbdc667159 100644
> > --- a/gcc/config/i386/avx10_2convertintrin.h
> > +++ b/gcc/config/i386/avx10_2convertintrin.h
> > @@ -163,7 +163,7 @@ _mm256_maskz_cvtbiasph_bf8 (__mmask16 __U,
> __m256i
> > __A, __m256h __B)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_cvtbiassph_bf8 (__m128i __A, __m128h __B)
> > +_mm_cvts_biasph_bf8 (__m128i __A, __m128h __B)
> > {
> > return (__m128i) __builtin_ia32_vcvtbiasph2bf8s128 ((__v16qi) __A,
> > (__v8hf) __B);
> > @@ -171,8 +171,8 @@ _mm_cvtbiassph_bf8 (__m128i __A, __m128h __B)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_mask_cvtbiassph_bf8 (__m128i __W, __mmask8 __U,
> > - __m128i __A, __m128h __B)
> > +_mm_mask_cvts_biasph_bf8 (__m128i __W, __mmask8 __U,
> > + __m128i __A, __m128h __B)
> > {
> > return (__m128i) __builtin_ia32_vcvtbiasph2bf8s128_mask ((__v16qi)
> __A,
> > (__v8hf) __B,
> > @@ -182,7 +182,7 @@ _mm_mask_cvtbiassph_bf8 (__m128i __W,
> __mmask8
> > __U,
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_maskz_cvtbiassph_bf8 (__mmask8 __U, __m128i __A, __m128h
> __B)
> > +_mm_maskz_cvts_biasph_bf8 (__mmask8 __U, __m128i __A, __m128h
> __B)
> > {
> > return (__m128i) __builtin_ia32_vcvtbiasph2bf8s128_mask ((__v16qi)
> __A,
> > (__v8hf) __B,
> > @@ -193,7 +193,7 @@ _mm_maskz_cvtbiassph_bf8 (__mmask8 __U,
> __m128i
> > __A, __m128h __B)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_cvtbiassph_bf8 (__m256i __A, __m256h __B)
> > +_mm256_cvts_biasph_bf8 (__m256i __A, __m256h __B)
> > {
> > return (__m128i) __builtin_ia32_vcvtbiasph2bf8s256_mask ((__v32qi)
> __A,
> > (__v16hf) __B,
> > @@ -204,8 +204,8 @@ _mm256_cvtbiassph_bf8 (__m256i __A, __m256h
> __B)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_mask_cvtbiassph_bf8 (__m128i __W, __mmask16 __U,
> > - __m256i __A, __m256h __B)
> > +_mm256_mask_cvts_biasph_bf8 (__m128i __W, __mmask16 __U,
> > + __m256i __A, __m256h __B)
> > {
> > return (__m128i) __builtin_ia32_vcvtbiasph2bf8s256_mask ((__v32qi)
> __A,
> > (__v16hf) __B,
> > @@ -215,7 +215,7 @@ _mm256_mask_cvtbiassph_bf8 (__m128i __W,
> > __mmask16 __U,
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_maskz_cvtbiassph_bf8 (__mmask16 __U, __m256i __A,
> __m256h __B)
> > +_mm256_maskz_cvts_biasph_bf8 (__mmask16 __U, __m256i __A,
> __m256h
> > __B)
> > {
> > return (__m128i) __builtin_ia32_vcvtbiasph2bf8s256_mask ((__v32qi)
> __A,
> > (__v16hf) __B,
> > @@ -289,7 +289,7 @@ _mm256_maskz_cvtbiasph_hf8 (__mmask16 __U,
> __m256i
> > __A, __m256h __B)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_cvtbiassph_hf8 (__m128i __A, __m128h __B)
> > +_mm_cvts_biasph_hf8 (__m128i __A, __m128h __B)
> > {
> > return (__m128i) __builtin_ia32_vcvtbiasph2hf8s128 ((__v16qi) __A,
> > (__v8hf) __B);
> > @@ -297,8 +297,8 @@ _mm_cvtbiassph_hf8 (__m128i __A, __m128h __B)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_mask_cvtbiassph_hf8 (__m128i __W, __mmask8 __U,
> > - __m128i __A, __m128h __B)
> > +_mm_mask_cvts_biasph_hf8 (__m128i __W, __mmask8 __U,
> > + __m128i __A, __m128h __B)
> > {
> > return (__m128i) __builtin_ia32_vcvtbiasph2hf8s128_mask ((__v16qi)
> __A,
> > (__v8hf) __B,
> > @@ -308,7 +308,7 @@ _mm_mask_cvtbiassph_hf8 (__m128i __W,
> __mmask8
> > __U,
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_maskz_cvtbiassph_hf8 (__mmask8 __U, __m128i __A, __m128h
> __B)
> > +_mm_maskz_cvts_biasph_hf8 (__mmask8 __U, __m128i __A, __m128h
> __B)
> > {
> > return (__m128i) __builtin_ia32_vcvtbiasph2hf8s128_mask ((__v16qi)
> __A,
> > (__v8hf) __B,
> > @@ -319,7 +319,7 @@ _mm_maskz_cvtbiassph_hf8 (__mmask8 __U,
> __m128i
> > __A, __m128h __B)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_cvtbiassph_hf8 (__m256i __A, __m256h __B)
> > +_mm256_cvts_biasph_hf8 (__m256i __A, __m256h __B)
> > {
> > return (__m128i) __builtin_ia32_vcvtbiasph2hf8s256_mask ((__v32qi)
> __A,
> > (__v16hf) __B,
> > @@ -330,8 +330,8 @@ _mm256_cvtbiassph_hf8 (__m256i __A, __m256h
> __B)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_mask_cvtbiassph_hf8 (__m128i __W, __mmask16 __U,
> > - __m256i __A, __m256h __B)
> > +_mm256_mask_cvts_biasph_hf8 (__m128i __W, __mmask16 __U,
> > + __m256i __A, __m256h __B)
> > {
> > return (__m128i) __builtin_ia32_vcvtbiasph2hf8s256_mask ((__v32qi)
> __A,
> > (__v16hf) __B,
> > @@ -341,7 +341,7 @@ _mm256_mask_cvtbiassph_hf8 (__m128i __W,
> > __mmask16 __U,
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_maskz_cvtbiassph_hf8 (__mmask16 __U, __m256i __A,
> __m256h __B)
> > +_mm256_maskz_cvts_biasph_hf8 (__mmask16 __U, __m256i __A,
> __m256h
> > __B)
> > {
> > return (__m128i) __builtin_ia32_vcvtbiasph2hf8s256_mask ((__v32qi)
> __A,
> > (__v16hf) __B,
> > @@ -418,7 +418,7 @@ _mm256_maskz_cvt2ph_bf8 (__mmask32 __U,
> __m256h
> > __A, __m256h __B)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_cvts2ph_bf8 (__m128h __A, __m128h __B)
> > +_mm_cvts_2ph_bf8 (__m128h __A, __m128h __B)
> > {
> > return (__m128i) __builtin_ia32_vcvt2ph2bf8s128_mask ((__v8hf) __A,
> > (__v8hf) __B,
> > @@ -429,8 +429,8 @@ _mm_cvts2ph_bf8 (__m128h __A, __m128h __B)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_mask_cvts2ph_bf8 (__m128i __W, __mmask16 __U,
> > - __m128h __A, __m128h __B)
> > +_mm_mask_cvts_2ph_bf8 (__m128i __W, __mmask16 __U,
> > + __m128h __A, __m128h __B)
> > {
> > return (__m128i) __builtin_ia32_vcvt2ph2bf8s128_mask ((__v8hf) __A,
> > (__v8hf) __B,
> > @@ -440,7 +440,7 @@ _mm_mask_cvts2ph_bf8 (__m128i __W,
> __mmask16 __U,
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_maskz_cvts2ph_bf8 (__mmask16 __U, __m128h __A, __m128h __B)
> > +_mm_maskz_cvts_2ph_bf8 (__mmask16 __U, __m128h __A, __m128h
> __B)
> > {
> > return (__m128i) __builtin_ia32_vcvt2ph2bf8s128_mask ((__v8hf) __A,
> > (__v8hf) __B,
> > @@ -451,7 +451,7 @@ _mm_maskz_cvts2ph_bf8 (__mmask16 __U,
> __m128h __A,
> > __m128h __B)
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_cvts2ph_bf8 (__m256h __A, __m256h __B)
> > +_mm256_cvts_2ph_bf8 (__m256h __A, __m256h __B)
> > {
> > return (__m256i) __builtin_ia32_vcvt2ph2bf8s256_mask ((__v16hf) __A,
> > (__v16hf) __B,
> > @@ -462,8 +462,8 @@ _mm256_cvts2ph_bf8 (__m256h __A, __m256h
> __B)
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_mask_cvts2ph_bf8 (__m256i __W, __mmask32 __U,
> > - __m256h __A, __m256h __B)
> > +_mm256_mask_cvts_2ph_bf8 (__m256i __W, __mmask32 __U,
> > + __m256h __A, __m256h __B)
> > {
> > return (__m256i) __builtin_ia32_vcvt2ph2bf8s256_mask ((__v16hf) __A,
> > (__v16hf) __B,
> > @@ -473,7 +473,7 @@ _mm256_mask_cvts2ph_bf8 (__m256i __W,
> > __mmask32 __U,
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_maskz_cvts2ph_bf8 (__mmask32 __U, __m256h __A, __m256h
> __B)
> > +_mm256_maskz_cvts_2ph_bf8 (__mmask32 __U, __m256h __A,
> __m256h __B)
> > {
> > return (__m256i) __builtin_ia32_vcvt2ph2bf8s256_mask ((__v16hf) __A,
> > (__v16hf) __B,
> > @@ -550,7 +550,7 @@ _mm256_maskz_cvt2ph_hf8 (__mmask32 __U,
> __m256h
> > __A, __m256h __B)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_cvts2ph_hf8 (__m128h __A, __m128h __B)
> > +_mm_cvts_2ph_hf8 (__m128h __A, __m128h __B)
> > {
> > return (__m128i) __builtin_ia32_vcvt2ph2hf8s128_mask ((__v8hf) __A,
> > (__v8hf) __B,
> > @@ -561,8 +561,8 @@ _mm_cvts2ph_hf8 (__m128h __A, __m128h __B)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_mask_cvts2ph_hf8 (__m128i __W, __mmask16 __U,
> > - __m128h __A, __m128h __B)
> > +_mm_mask_cvts_2ph_hf8 (__m128i __W, __mmask16 __U,
> > + __m128h __A, __m128h __B)
> > {
> > return (__m128i) __builtin_ia32_vcvt2ph2hf8s128_mask ((__v8hf) __A,
> > (__v8hf) __B,
> > @@ -572,7 +572,7 @@ _mm_mask_cvts2ph_hf8 (__m128i __W,
> __mmask16 __U,
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_maskz_cvts2ph_hf8 (__mmask16 __U, __m128h __A, __m128h __B)
> > +_mm_maskz_cvts_2ph_hf8 (__mmask16 __U, __m128h __A, __m128h
> __B)
> > {
> > return (__m128i) __builtin_ia32_vcvt2ph2hf8s128_mask ((__v8hf) __A,
> > (__v8hf) __B,
> > @@ -583,7 +583,7 @@ _mm_maskz_cvts2ph_hf8 (__mmask16 __U,
> __m128h __A,
> > __m128h __B)
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_cvts2ph_hf8 (__m256h __A, __m256h __B)
> > +_mm256_cvts_2ph_hf8 (__m256h __A, __m256h __B)
> > {
> > return (__m256i) __builtin_ia32_vcvt2ph2hf8s256_mask ((__v16hf) __A,
> > (__v16hf) __B,
> > @@ -594,8 +594,8 @@ _mm256_cvts2ph_hf8 (__m256h __A, __m256h
> __B)
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_mask_cvts2ph_hf8 (__m256i __W, __mmask32 __U,
> > - __m256h __A, __m256h __B)
> > +_mm256_mask_cvts_2ph_hf8 (__m256i __W, __mmask32 __U,
> > + __m256h __A, __m256h __B)
> > {
> > return (__m256i) __builtin_ia32_vcvt2ph2hf8s256_mask ((__v16hf) __A,
> > (__v16hf) __B,
> > @@ -605,7 +605,7 @@ _mm256_mask_cvts2ph_hf8 (__m256i __W,
> > __mmask32 __U,
> >
> > extern __inline__ __m256i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_maskz_cvts2ph_hf8 (__mmask32 __U, __m256h __A, __m256h
> __B)
> > +_mm256_maskz_cvts_2ph_hf8 (__mmask32 __U, __m256h __A,
> __m256h __B)
> > {
> > return (__m256i) __builtin_ia32_vcvt2ph2hf8s256_mask ((__v16hf) __A,
> > (__v16hf) __B,
> > @@ -732,7 +732,7 @@ _mm256_maskz_cvtph_bf8 (__mmask16 __U,
> __m256h
> > __A)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_cvtsph_bf8 (__m128h __A)
> > +_mm_cvts_ph_bf8 (__m128h __A)
> > {
> > return (__m128i) __builtin_ia32_vcvtph2bf8s128_mask ((__v8hf) __A,
> > (__v16qi)(__m128i)
> > @@ -742,7 +742,7 @@ _mm_cvtsph_bf8 (__m128h __A)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_mask_cvtsph_bf8 (__m128i __W, __mmask8 __U, __m128h __A)
> > +_mm_mask_cvts_ph_bf8 (__m128i __W, __mmask8 __U, __m128h __A)
> > {
> > return (__m128i) __builtin_ia32_vcvtph2bf8s128_mask ((__v8hf) __A,
> > (__v16qi)(__m128i) __W,
> > @@ -751,7 +751,7 @@ _mm_mask_cvtsph_bf8 (__m128i __W, __mmask8
> __U,
> > __m128h __A)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_maskz_cvtsph_bf8 (__mmask8 __U, __m128h __A)
> > +_mm_maskz_cvts_ph_bf8 (__mmask8 __U, __m128h __A)
> > {
> > return (__m128i) __builtin_ia32_vcvtph2bf8s128_mask ((__v8hf) __A,
> > (__v16qi)(__m128i)
> > @@ -761,7 +761,7 @@ _mm_maskz_cvtsph_bf8 (__mmask8 __U,
> __m128h __A)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_cvtsph_bf8 (__m256h __A)
> > +_mm256_cvts_ph_bf8 (__m256h __A)
> > {
> > return (__m128i) __builtin_ia32_vcvtph2bf8s256_mask ((__v16hf) __A,
> > (__v16qi)(__m128i)
> > @@ -771,7 +771,7 @@ _mm256_cvtsph_bf8 (__m256h __A)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_mask_cvtsph_bf8 (__m128i __W, __mmask16 __U, __m256h
> __A)
> > +_mm256_mask_cvts_ph_bf8 (__m128i __W, __mmask16 __U, __m256h
> __A)
> > {
> > return (__m128i) __builtin_ia32_vcvtph2bf8s256_mask ((__v16hf) __A,
> > (__v16qi)(__m128i) __W,
> > @@ -780,7 +780,7 @@ _mm256_mask_cvtsph_bf8 (__m128i __W,
> __mmask16
> > __U, __m256h __A)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_maskz_cvtsph_bf8 (__mmask16 __U, __m256h __A)
> > +_mm256_maskz_cvts_ph_bf8 (__mmask16 __U, __m256h __A)
> > {
> > return (__m128i) __builtin_ia32_vcvtph2bf8s256_mask ((__v16hf) __A,
> > (__v16qi)(__m128i)
> > @@ -848,7 +848,7 @@ _mm256_maskz_cvtph_hf8 (__mmask16 __U,
> __m256h
> > __A)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_cvtsph_hf8 (__m128h __A)
> > +_mm_cvts_ph_hf8 (__m128h __A)
> > {
> > return (__m128i) __builtin_ia32_vcvtph2hf8s128_mask ((__v8hf) __A,
> > (__v16qi)(__m128i)
> > @@ -858,7 +858,7 @@ _mm_cvtsph_hf8 (__m128h __A)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_mask_cvtsph_hf8 (__m128i __W, __mmask8 __U, __m128h __A)
> > +_mm_mask_cvts_ph_hf8 (__m128i __W, __mmask8 __U, __m128h __A)
> > {
> > return (__m128i) __builtin_ia32_vcvtph2hf8s128_mask ((__v8hf) __A,
> > (__v16qi)(__m128i) __W,
> > @@ -867,7 +867,7 @@ _mm_mask_cvtsph_hf8 (__m128i __W, __mmask8
> __U,
> > __m128h __A)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm_maskz_cvtsph_hf8 (__mmask8 __U, __m128h __A)
> > +_mm_maskz_cvts_ph_hf8 (__mmask8 __U, __m128h __A)
> > {
> > return (__m128i) __builtin_ia32_vcvtph2hf8s128_mask ((__v8hf) __A,
> > (__v16qi)(__m128i)
> > @@ -877,7 +877,7 @@ _mm_maskz_cvtsph_hf8 (__mmask8 __U,
> __m128h __A)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_cvtsph_hf8 (__m256h __A)
> > +_mm256_cvts_ph_hf8 (__m256h __A)
> > {
> > return (__m128i) __builtin_ia32_vcvtph2hf8s256_mask ((__v16hf) __A,
> > (__v16qi)(__m128i)
> > @@ -887,7 +887,7 @@ _mm256_cvtsph_hf8 (__m256h __A)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_mask_cvtsph_hf8 (__m128i __W, __mmask16 __U, __m256h
> __A)
> > +_mm256_mask_cvts_ph_hf8 (__m128i __W, __mmask16 __U, __m256h
> __A)
> > {
> > return (__m128i) __builtin_ia32_vcvtph2hf8s256_mask ((__v16hf) __A,
> > (__v16qi)(__m128i) __W,
> > @@ -896,7 +896,7 @@ _mm256_mask_cvtsph_hf8 (__m128i __W,
> __mmask16
> > __U, __m256h __A)
> >
> > extern __inline__ __m128i
> > __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> > -_mm256_maskz_cvtsph_hf8 (__mmask16 __U, __m256h __A)
> > +_mm256_maskz_cvts_ph_hf8 (__mmask16 __U, __m256h __A)
> > {
> > return (__m128i) __builtin_ia32_vcvtph2hf8s256_mask ((__v16hf) __A,
> > (__v16qi)(__m128i)
> > diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-convert-1.c
> > b/gcc/testsuite/gcc.target/i386/avx10_2-512-convert-1.c
> > index e9323626a69..ff103d077b1 100644
> > --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-convert-1.c
> > +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-convert-1.c
> > @@ -86,9 +86,9 @@ avx10_2_vcvtbiasph2bf8_test (void) void extern
> > avx10_2_vcvtbiasph2bf8s_test (void) {
> > - x256i = _mm512_cvtbiassph_bf8 (x512i, x512h);
> > - x256i = _mm512_mask_cvtbiassph_bf8 (x256i, m32, x512i, x512h);
> > - x256i = _mm512_maskz_cvtbiassph_bf8 (m32, x512i, x512h);
> > + x256i = _mm512_cvts_biasph_bf8 (x512i, x512h); x256i =
> > + _mm512_mask_cvts_biasph_bf8 (x256i, m32, x512i, x512h); x256i =
> > + _mm512_maskz_cvts_biasph_bf8 (m32, x512i, x512h);
> > }
> >
> > void extern
> > @@ -102,9 +102,9 @@ avx10_2_vcvtbiasph2hf8_test (void) void extern
> > avx10_2_vcvtbiasph2hf8s_test (void) {
> > - x256i = _mm512_cvtbiassph_hf8 (x512i, x512h);
> > - x256i = _mm512_mask_cvtbiassph_hf8 (x256i, m32, x512i, x512h);
> > - x256i = _mm512_maskz_cvtbiassph_hf8 (m32, x512i, x512h);
> > + x256i = _mm512_cvts_biasph_hf8 (x512i, x512h); x256i =
> > + _mm512_mask_cvts_biasph_hf8 (x256i, m32, x512i, x512h); x256i =
> > + _mm512_maskz_cvts_biasph_hf8 (m32, x512i, x512h);
> > }
> >
> > void extern
> > @@ -118,9 +118,9 @@ avx10_2_vcvt2ph2bf8_test (void) void extern
> > avx10_2_vcvt2ph2bf8s_test (void) {
> > - x512i = _mm512_cvts2ph_bf8 (x512h, x512h);
> > - x512i = _mm512_mask_cvts2ph_bf8 (x512i, m64, x512h, x512h);
> > - x512i = _mm512_maskz_cvts2ph_bf8 (m64, x512h, x512h);
> > + x512i = _mm512_cvts_2ph_bf8 (x512h, x512h); x512i =
> > + _mm512_mask_cvts_2ph_bf8 (x512i, m64, x512h, x512h); x512i =
> > + _mm512_maskz_cvts_2ph_bf8 (m64, x512h, x512h);
> > }
> >
> > void extern
> > @@ -134,9 +134,9 @@ avx10_2_vcvt2ph2hf8_test (void) void extern
> > avx10_2_vcvt2ph2hf8s_test (void) {
> > - x512i = _mm512_cvts2ph_hf8 (x512h, x512h);
> > - x512i = _mm512_mask_cvts2ph_hf8 (x512i, m64, x512h, x512h);
> > - x512i = _mm512_maskz_cvts2ph_hf8 (m64, x512h, x512h);
> > + x512i = _mm512_cvts_2ph_hf8 (x512h, x512h); x512i =
> > + _mm512_mask_cvts_2ph_hf8 (x512i, m64, x512h, x512h); x512i =
> > + _mm512_maskz_cvts_2ph_hf8 (m64, x512h, x512h);
> > }
> >
> > void extern
> > @@ -158,9 +158,9 @@ avx10_2_vcvtph2bf8_test (void) void extern
> > avx10_2_vcvtph2bf8s_test (void) {
> > - x256i = _mm512_cvtsph_bf8 (x512h);
> > - x256i = _mm512_mask_cvtsph_bf8 (x256i, m32, x512h);
> > - x256i = _mm512_maskz_cvtsph_bf8 (m32, x512h);
> > + x256i = _mm512_cvts_ph_bf8 (x512h); x256i =
> > + _mm512_mask_cvts_ph_bf8 (x256i, m32, x512h); x256i =
> > + _mm512_maskz_cvts_ph_bf8 (m32, x512h);
> > }
> >
> > void extern
> > @@ -174,9 +174,9 @@ avx10_2_vcvtph2hf8_test (void) void extern
> > avx10_2_vcvtph2hf8s_test (void) {
> > - x256i = _mm512_cvtsph_hf8 (x512h);
> > - x256i = _mm512_mask_cvtsph_hf8 (x256i, m32, x512h);
> > - x256i = _mm512_maskz_cvtsph_hf8 (m32, x512h);
> > + x256i = _mm512_cvts_ph_hf8 (x512h); x256i =
> > + _mm512_mask_cvts_ph_hf8 (x256i, m32, x512h); x256i =
> > + _mm512_maskz_cvts_ph_hf8 (m32, x512h);
> > }
> >
> > void extern
> > diff --git
> > a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvt2ph2bf8s-2.c
> > b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvt2ph2bf8s-2.c
> > index aa8545cedf0..33d9c0c3ce4 100644
> > --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvt2ph2bf8s-2.c
> > +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvt2ph2bf8s-2.c
> > @@ -64,16 +64,16 @@ TEST (void)
> >
> > CALC(res_ref, src1.a, src2.a);
> >
> > - res1.x = INTRINSIC (_cvts2ph_bf8) (src1.x, src2.x);
> > + res1.x = INTRINSIC (_cvts_2ph_bf8) (src1.x, src2.x);
> > if (UNION_CHECK (AVX512F_LEN, i_b) (res1, res_ref))
> > abort ();
> >
> > - res2.x = INTRINSIC (_mask_cvts2ph_bf8) (res2.x, mask, src1.x,
> > src2.x);
> > + res2.x = INTRINSIC (_mask_cvts_2ph_bf8) (res2.x, mask, src1.x,
> > + src2.x);
> > MASK_MERGE (i_b) (res_ref, mask, SIZE);
> > if (UNION_CHECK (AVX512F_LEN, i_b) (res2, res_ref))
> > abort ();
> >
> > - res3.x = INTRINSIC (_maskz_cvts2ph_bf8) (mask, src1.x, src2.x);
> > + res3.x = INTRINSIC (_maskz_cvts_2ph_bf8) (mask, src1.x, src2.x);
> > MASK_ZERO (i_b) (res_ref, mask, SIZE);
> > if (UNION_CHECK (AVX512F_LEN, i_b) (res3, res_ref))
> > abort ();
> > diff --git
> > a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvt2ph2hf8s-2.c
> > b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvt2ph2hf8s-2.c
> > index afed1d1e19c..b9fdfac4296 100644
> > --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvt2ph2hf8s-2.c
> > +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvt2ph2hf8s-2.c
> > @@ -64,16 +64,16 @@ TEST (void)
> >
> > CALC(res_ref, src1.a, src2.a);
> >
> > - res1.x = INTRINSIC (_cvts2ph_hf8) (src1.x, src2.x);
> > + res1.x = INTRINSIC (_cvts_2ph_hf8) (src1.x, src2.x);
> > if (UNION_CHECK (AVX512F_LEN, i_b) (res1, res_ref))
> > abort ();
> >
> > - res2.x = INTRINSIC (_mask_cvts2ph_hf8) (res2.x, mask, src1.x,
> > src2.x);
> > + res2.x = INTRINSIC (_mask_cvts_2ph_hf8) (res2.x, mask, src1.x,
> > + src2.x);
> > MASK_MERGE (i_b) (res_ref, mask, SIZE);
> > if (UNION_CHECK (AVX512F_LEN, i_b) (res2, res_ref))
> > abort ();
> >
> > - res3.x = INTRINSIC (_maskz_cvts2ph_hf8) (mask, src1.x, src2.x);
> > + res3.x = INTRINSIC (_maskz_cvts_2ph_hf8) (mask, src1.x, src2.x);
> > MASK_ZERO (i_b) (res_ref, mask, SIZE);
> > if (UNION_CHECK (AVX512F_LEN, i_b) (res3, res_ref))
> > abort ();
> > diff --git
> > a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2bf8s-2.c
> > b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2bf8s-2.c
> > index 88ced07203c..93de7ea358f 100644
> > --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2bf8s-2.c
> > +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2bf8s-2.c
> > @@ -61,16 +61,16 @@ TEST (void)
> > CALC (res_ref, src1.a, src2.a);
> >
> >
> > - res1.x = INTRINSIC (_cvtbiassph_bf8) (src1.x, src2.x);
> > + res1.x = INTRINSIC (_cvts_biasph_bf8) (src1.x, src2.x);
> > if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res1, res_ref))
> > abort ();
> >
> > - res2.x = INTRINSIC (_mask_cvtbiassph_bf8) (res2.x, mask, src1.x,
> > src2.x);
> > + res2.x = INTRINSIC (_mask_cvts_biasph_bf8) (res2.x, mask, src1.x,
> > + src2.x);
> > MASK_MERGE (i_b) (res_ref, mask, SIZE);
> > if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res2, res_ref))
> > abort ();
> >
> > - res3.x = INTRINSIC (_maskz_cvtbiassph_bf8) (mask, src1.x, src2.x);
> > + res3.x = INTRINSIC (_maskz_cvts_biasph_bf8) (mask, src1.x, src2.x);
> > MASK_ZERO (i_b) (res_ref, mask, SIZE);
> > if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res3, res_ref))
> > abort ();
> > diff --git
> > a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2hf8s-2.c
> > b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2hf8s-2.c
> > index 1a8b4d6e9dc..0333f08f930 100644
> > --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2hf8s-2.c
> > +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2hf8s-2.c
> > @@ -60,16 +60,16 @@ TEST (void)
> >
> > CALC (res_ref, src1.a, src2.a);
> >
> > - res1.x = INTRINSIC (_cvtbiassph_hf8) (src1.x, src2.x);
> > + res1.x = INTRINSIC (_cvts_biasph_hf8) (src1.x, src2.x);
> > if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res1, res_ref))
> > abort ();
> >
> > - res2.x = INTRINSIC (_mask_cvtbiassph_hf8) (res2.x, mask, src1.x,
> > src2.x);
> > + res2.x = INTRINSIC (_mask_cvts_biasph_hf8) (res2.x, mask, src1.x,
> > + src2.x);
> > MASK_MERGE (i_b) (res_ref, mask, SIZE);
> > if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res2, res_ref))
> > abort ();
> >
> > - res3.x = INTRINSIC (_maskz_cvtbiassph_hf8) (mask, src1.x, src2.x);
> > + res3.x = INTRINSIC (_maskz_cvts_biasph_hf8) (mask, src1.x, src2.x);
> > MASK_ZERO (i_b) (res_ref, mask, SIZE);
> > if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res3, res_ref))
> > abort ();
> > diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtph2bf8s-2.c
> > b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtph2bf8s-2.c
> > index f4853ce29cd..c22e1aaa7d3 100644
> > --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtph2bf8s-2.c
> > +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtph2bf8s-2.c
> > @@ -60,16 +60,16 @@ TEST (void)
> >
> > CALC(res_ref, src.a);
> >
> > - res1.x = INTRINSIC (_cvtsph_bf8) (src.x);
> > + res1.x = INTRINSIC (_cvts_ph_bf8) (src.x);
> > if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res1, res_ref))
> > abort ();
> >
> > - res2.x = INTRINSIC (_mask_cvtsph_bf8) (res2.x, mask, src.x);
> > + res2.x = INTRINSIC (_mask_cvts_ph_bf8) (res2.x, mask, src.x);
> > MASK_MERGE (i_b) (res_ref, mask, SIZE);
> > if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res2, res_ref))
> > abort ();
> >
> > - res3.x = INTRINSIC (_maskz_cvtsph_bf8) (mask, src.x);
> > + res3.x = INTRINSIC (_maskz_cvts_ph_bf8) (mask, src.x);
> > MASK_ZERO (i_b) (res_ref, mask, SIZE);
> > if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res3, res_ref))
> > abort ();
> > diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtph2hf8s-2.c
> > b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtph2hf8s-2.c
> > index 43610bf9342..e6872e84535 100644
> > --- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtph2hf8s-2.c
> > +++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtph2hf8s-2.c
> > @@ -60,16 +60,16 @@ TEST (void)
> >
> > CALC(res_ref, src.a);
> >
> > - res1.x = INTRINSIC (_cvtsph_hf8) (src.x);
> > + res1.x = INTRINSIC (_cvts_ph_hf8) (src.x);
> > if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res1, res_ref))
> > abort ();
> >
> > - res2.x = INTRINSIC (_mask_cvtsph_hf8) (res2.x, mask, src.x);
> > + res2.x = INTRINSIC (_mask_cvts_ph_hf8) (res2.x, mask, src.x);
> > MASK_MERGE (i_b) (res_ref, mask, SIZE);
> > if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res2, res_ref))
> > abort ();
> >
> > - res3.x = INTRINSIC (_maskz_cvtsph_hf8) (mask, src.x);
> > + res3.x = INTRINSIC (_maskz_cvts_ph_hf8) (mask, src.x);
> > MASK_ZERO (i_b) (res_ref, mask, SIZE);
> > if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res3, res_ref))
> > abort ();
> > diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-convert-1.c
> > b/gcc/testsuite/gcc.target/i386/avx10_2-convert-1.c
> > index 62791d096af..3d5e921c538 100644
> > --- a/gcc/testsuite/gcc.target/i386/avx10_2-convert-1.c
> > +++ b/gcc/testsuite/gcc.target/i386/avx10_2-convert-1.c
> > @@ -138,13 +138,13 @@ avx10_2_vcvtbiasph2bf8_test (void) void extern
> > avx10_2_vcvtbiasph2bf8s_test (void) {
> > - x128i = _mm_cvtbiassph_bf8 (x128i, x128h);
> > - x128i = _mm_mask_cvtbiassph_bf8 (x128i, m8, x128i, x128h);
> > - x128i = _mm_maskz_cvtbiassph_bf8 (m8, x128i, x128h);
> > + x128i = _mm_cvts_biasph_bf8 (x128i, x128h); x128i =
> > + _mm_mask_cvts_biasph_bf8 (x128i, m8, x128i, x128h); x128i =
> > + _mm_maskz_cvts_biasph_bf8 (m8, x128i, x128h);
> >
> > - x128i = _mm256_cvtbiassph_bf8 (x256i, x256h);
> > - x128i = _mm256_mask_cvtbiassph_bf8 (x128i, m16, x256i, x256h);
> > - x128i = _mm256_maskz_cvtbiassph_bf8 (m16, x256i, x256h);
> > + x128i = _mm256_cvts_biasph_bf8 (x256i, x256h); x128i =
> > + _mm256_mask_cvts_biasph_bf8 (x128i, m16, x256i, x256h); x128i =
> > + _mm256_maskz_cvts_biasph_bf8 (m16, x256i, x256h);
> > }
> >
> > void extern
> > @@ -162,13 +162,13 @@ avx10_2_vcvtbiasph2hf8_test (void) void extern
> > avx10_2_vcvtbiasph2hf8s_test (void) {
> > - x128i = _mm_cvtbiassph_hf8 (x128i, x128h);
> > - x128i = _mm_mask_cvtbiassph_hf8 (x128i, m8, x128i, x128h);
> > - x128i = _mm_maskz_cvtbiassph_hf8 (m8, x128i, x128h);
> > + x128i = _mm_cvts_biasph_hf8 (x128i, x128h); x128i =
> > + _mm_mask_cvts_biasph_hf8 (x128i, m8, x128i, x128h); x128i =
> > + _mm_maskz_cvts_biasph_hf8 (m8, x128i, x128h);
> >
> > - x128i = _mm256_cvtbiassph_hf8 (x256i, x256h);
> > - x128i = _mm256_mask_cvtbiassph_hf8 (x128i, m16, x256i, x256h);
> > - x128i = _mm256_maskz_cvtbiassph_hf8 (m16, x256i, x256h);
> > + x128i = _mm256_cvts_biasph_hf8 (x256i, x256h); x128i =
> > + _mm256_mask_cvts_biasph_hf8 (x128i, m16, x256i, x256h); x128i =
> > + _mm256_maskz_cvts_biasph_hf8 (m16, x256i, x256h);
> > }
> >
> > void extern
> > @@ -185,12 +185,12 @@ avx10_2_vcvt2ph2bf8_test (void) void extern
> > avx10_2_vcvt2ph2bf8s_test (void) {
> > - x128i = _mm_cvts2ph_bf8 (x128h, x128h);
> > - x128i = _mm_mask_cvts2ph_bf8 (x128i, m16, x128h, x128h);
> > - x128i = _mm_maskz_cvts2ph_bf8 (m16, x128h, x128h);
> > - x256i = _mm256_cvts2ph_bf8 (x256h, x256h);
> > - x256i = _mm256_mask_cvts2ph_bf8 (x256i, m32, x256h, x256h);
> > - x256i = _mm256_maskz_cvts2ph_bf8 (m32, x256h, x256h);
> > + x128i = _mm_cvts_2ph_bf8 (x128h, x128h); x128i =
> > + _mm_mask_cvts_2ph_bf8 (x128i, m16, x128h, x128h); x128i =
> > + _mm_maskz_cvts_2ph_bf8 (m16, x128h, x128h); x256i =
> > + _mm256_cvts_2ph_bf8 (x256h, x256h); x256i =
> > + _mm256_mask_cvts_2ph_bf8 (x256i, m32, x256h, x256h); x256i =
> > + _mm256_maskz_cvts_2ph_bf8 (m32, x256h, x256h);
> > }
> >
> > void extern
> > @@ -207,12 +207,12 @@ avx10_2_vcvt2ph2hf8_test (void) void extern
> > avx10_2_vcvt2ph2hf8s_test (void) {
> > - x128i = _mm_cvts2ph_hf8 (x128h, x128h);
> > - x128i = _mm_mask_cvts2ph_hf8 (x128i, m16, x128h, x128h);
> > - x128i = _mm_maskz_cvts2ph_hf8 (m16, x128h, x128h);
> > - x256i = _mm256_cvts2ph_hf8 (x256h, x256h);
> > - x256i = _mm256_mask_cvts2ph_hf8 (x256i, m32, x256h, x256h);
> > - x256i = _mm256_maskz_cvts2ph_hf8 (m32, x256h, x256h);
> > + x128i = _mm_cvts_2ph_hf8 (x128h, x128h); x128i =
> > + _mm_mask_cvts_2ph_hf8 (x128i, m16, x128h, x128h); x128i =
> > + _mm_maskz_cvts_2ph_hf8 (m16, x128h, x128h); x256i =
> > + _mm256_cvts_2ph_hf8 (x256h, x256h); x256i =
> > + _mm256_mask_cvts_2ph_hf8 (x256i, m32, x256h, x256h); x256i =
> > + _mm256_maskz_cvts_2ph_hf8 (m32, x256h, x256h);
> > }
> >
> > void extern
> > @@ -242,13 +242,13 @@ avx10_2_vcvtph2bf8_test (void) void extern
> > avx10_2_vcvtph2bf8s_test (void) {
> > - x128i = _mm_cvtsph_bf8 (x128h);
> > - x128i = _mm_mask_cvtsph_bf8 (x128i, m8, x128h);
> > - x128i = _mm_maskz_cvtsph_bf8 (m8, x128h);
> > + x128i = _mm_cvts_ph_bf8 (x128h);
> > + x128i = _mm_mask_cvts_ph_bf8 (x128i, m8, x128h); x128i =
> > + _mm_maskz_cvts_ph_bf8 (m8, x128h);
> >
> > - x128i = _mm256_cvtsph_bf8 (x256h);
> > - x128i = _mm256_mask_cvtsph_bf8 (x128i, m16, x256h);
> > - x128i = _mm256_maskz_cvtsph_bf8 (m16, x256h);
> > + x128i = _mm256_cvts_ph_bf8 (x256h); x128i =
> > + _mm256_mask_cvts_ph_bf8 (x128i, m16, x256h); x128i =
> > + _mm256_maskz_cvts_ph_bf8 (m16, x256h);
> > }
> >
> > void extern
> > @@ -266,13 +266,13 @@ avx10_2_vcvtph2hf8_test (void) void extern
> > avx10_2_vcvtph2hf8s_test (void) {
> > - x128i = _mm_cvtsph_hf8 (x128h);
> > - x128i = _mm_mask_cvtsph_hf8 (x128i, m8, x128h);
> > - x128i = _mm_maskz_cvtsph_hf8 (m8, x128h);
> > + x128i = _mm_cvts_ph_hf8 (x128h);
> > + x128i = _mm_mask_cvts_ph_hf8 (x128i, m8, x128h); x128i =
> > + _mm_maskz_cvts_ph_hf8 (m8, x128h);
> >
> > - x128i = _mm256_cvtsph_hf8 (x256h);
> > - x128i = _mm256_mask_cvtsph_hf8 (x128i, m16, x256h);
> > - x128i = _mm256_maskz_cvtsph_hf8 (m16, x256h);
> > + x128i = _mm256_cvts_ph_hf8 (x256h); x128i =
> > + _mm256_mask_cvts_ph_hf8 (x128i, m16, x256h); x128i =
> > + _mm256_maskz_cvts_ph_hf8 (m16, x256h);
> > }
> >
> > void extern
> > --
> > 2.31.1