gcc/ChangeLog:

        * config/i386/avx10_2-512convertintrin.h:
        Omit "p" for packed for FP8.
        * config/i386/avx10_2convertintrin.h: Ditto.

gcc/testsuite/ChangeLog:

        * gcc.target/i386/avx10_2-512-convert-1.c: Adjust intrin call.
        * gcc.target/i386/avx10_2-512-vcvtbiasph2bf8-2.c: Ditto.
        * gcc.target/i386/avx10_2-512-vcvtbiasph2bf8s-2.c: Ditto.
        * gcc.target/i386/avx10_2-512-vcvtbiasph2hf8-2.c: Ditto.
        * gcc.target/i386/avx10_2-512-vcvtbiasph2hf8s-2.c: Ditto.
        * gcc.target/i386/avx10_2-convert-1.c: Ditto.
---
 gcc/config/i386/avx10_2-512convertintrin.h    | 38 +++++-----
 gcc/config/i386/avx10_2convertintrin.h        | 76 +++++++++----------
 .../gcc.target/i386/avx10_2-512-convert-1.c   | 30 ++++----
 .../i386/avx10_2-512-vcvtbiasph2bf8-2.c       |  6 +-
 .../i386/avx10_2-512-vcvtbiasph2bf8s-2.c      |  6 +-
 .../i386/avx10_2-512-vcvtbiasph2hf8-2.c       |  6 +-
 .../i386/avx10_2-512-vcvtbiasph2hf8s-2.c      |  6 +-
 .../gcc.target/i386/avx10_2-convert-1.c       | 60 +++++++--------
 8 files changed, 114 insertions(+), 114 deletions(-)

diff --git a/gcc/config/i386/avx10_2-512convertintrin.h 
b/gcc/config/i386/avx10_2-512convertintrin.h
index 5c64b9f004b..1079e0a2bda 100644
--- a/gcc/config/i386/avx10_2-512convertintrin.h
+++ b/gcc/config/i386/avx10_2-512convertintrin.h
@@ -133,7 +133,7 @@ _mm512_maskz_cvtx_round2ps_ph (__mmask32 __U, __m512 __A,
 
 extern __inline__ __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvtbiasph_pbf8 (__m512i __A, __m512h __B)
+_mm512_cvtbiasph_bf8 (__m512i __A, __m512h __B)
 {
   return (__m256i) __builtin_ia32_vcvtbiasph2bf8512_mask ((__v64qi) __A,
                                                          (__v32hf) __B,
@@ -144,8 +144,8 @@ _mm512_cvtbiasph_pbf8 (__m512i __A, __m512h __B)
 
 extern __inline__ __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvtbiasph_pbf8 (__m256i __W, __mmask32 __U,
-                           __m512i __A, __m512h __B)
+_mm512_mask_cvtbiasph_bf8 (__m256i __W, __mmask32 __U,
+                          __m512i __A, __m512h __B)
 {
   return (__m256i) __builtin_ia32_vcvtbiasph2bf8512_mask ((__v64qi) __A,
                                                          (__v32hf) __B,
@@ -155,7 +155,7 @@ _mm512_mask_cvtbiasph_pbf8 (__m256i __W, __mmask32 __U,
 
 extern __inline__ __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvtbiasph_pbf8 (__mmask32 __U, __m512i __A, __m512h __B)
+_mm512_maskz_cvtbiasph_bf8 (__mmask32 __U, __m512i __A, __m512h __B)
 {
   return (__m256i) __builtin_ia32_vcvtbiasph2bf8512_mask ((__v64qi) __A,
                                                          (__v32hf) __B,
@@ -166,7 +166,7 @@ _mm512_maskz_cvtbiasph_pbf8 (__mmask32 __U, __m512i __A, 
__m512h __B)
 
 extern __inline__ __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvtbiassph_pbf8 (__m512i __A, __m512h __B)
+_mm512_cvtbiassph_bf8 (__m512i __A, __m512h __B)
 {
   return (__m256i) __builtin_ia32_vcvtbiasph2bf8s512_mask ((__v64qi) __A,
                                                           (__v32hf) __B,
@@ -177,8 +177,8 @@ _mm512_cvtbiassph_pbf8 (__m512i __A, __m512h __B)
 
 extern __inline__ __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvtbiassph_pbf8 (__m256i __W, __mmask32 __U,
-                            __m512i __A, __m512h __B)
+_mm512_mask_cvtbiassph_bf8 (__m256i __W, __mmask32 __U,
+                           __m512i __A, __m512h __B)
 {
   return (__m256i) __builtin_ia32_vcvtbiasph2bf8s512_mask ((__v64qi) __A,
                                                           (__v32hf) __B,
@@ -188,7 +188,7 @@ _mm512_mask_cvtbiassph_pbf8 (__m256i __W, __mmask32 __U,
 
 extern __inline__ __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvtbiassph_pbf8 (__mmask32 __U, __m512i __A, __m512h __B)
+_mm512_maskz_cvtbiassph_bf8 (__mmask32 __U, __m512i __A, __m512h __B)
 {
   return (__m256i) __builtin_ia32_vcvtbiasph2bf8s512_mask ((__v64qi) __A,
                                                           (__v32hf) __B,
@@ -199,7 +199,7 @@ _mm512_maskz_cvtbiassph_pbf8 (__mmask32 __U, __m512i __A, 
__m512h __B)
 
 extern __inline__ __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvtbiasph_phf8 (__m512i __A, __m512h __B)
+_mm512_cvtbiasph_hf8 (__m512i __A, __m512h __B)
 {
   return (__m256i) __builtin_ia32_vcvtbiasph2hf8512_mask ((__v64qi) __A,
                                                          (__v32hf) __B,
@@ -210,8 +210,8 @@ _mm512_cvtbiasph_phf8 (__m512i __A, __m512h __B)
 
 extern __inline__ __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvtbiasph_phf8 (__m256i __W, __mmask32 __U, __m512i __A,
-                               __m512h __B)
+_mm512_mask_cvtbiasph_hf8 (__m256i __W, __mmask32 __U, __m512i __A,
+                          __m512h __B)
 {
   return (__m256i) __builtin_ia32_vcvtbiasph2hf8512_mask ((__v64qi) __A,
                                                          (__v32hf) __B,
@@ -221,7 +221,7 @@ _mm512_mask_cvtbiasph_phf8 (__m256i __W, __mmask32 __U, 
__m512i __A,
 
 extern __inline__ __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvtbiasph_phf8 (__mmask32 __U, __m512i __A, __m512h __B)
+_mm512_maskz_cvtbiasph_hf8 (__mmask32 __U, __m512i __A, __m512h __B)
 {
   return (__m256i) __builtin_ia32_vcvtbiasph2hf8512_mask ((__v64qi) __A,
                                                          (__v32hf) __B,
@@ -232,7 +232,7 @@ _mm512_maskz_cvtbiasph_phf8 (__mmask32 __U, __m512i __A, 
__m512h __B)
 
 extern __inline__ __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvtbiassph_phf8 (__m512i __A, __m512h __B)
+_mm512_cvtbiassph_hf8 (__m512i __A, __m512h __B)
 {
   return (__m256i) __builtin_ia32_vcvtbiasph2hf8s512_mask ((__v64qi) __A,
                                                           (__v32hf) __B,
@@ -243,8 +243,8 @@ _mm512_cvtbiassph_phf8 (__m512i __A, __m512h __B)
 
 extern __inline__ __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvtbiassph_phf8 (__m256i __W, __mmask32 __U,
-                            __m512i __A, __m512h __B)
+_mm512_mask_cvtbiassph_hf8 (__m256i __W, __mmask32 __U,
+                           __m512i __A, __m512h __B)
 {
   return (__m256i) __builtin_ia32_vcvtbiasph2hf8s512_mask ((__v64qi) __A,
                                                           (__v32hf) __B,
@@ -254,7 +254,7 @@ _mm512_mask_cvtbiassph_phf8 (__m256i __W, __mmask32 __U,
 
 extern __inline__ __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvtbiassph_phf8 (__mmask32 __U, __m512i __A, __m512h __B)
+_mm512_maskz_cvtbiassph_hf8 (__mmask32 __U, __m512i __A, __m512h __B)
 {
   return (__m256i) __builtin_ia32_vcvtbiasph2hf8s512_mask ((__v64qi) __A,
                                                           (__v32hf) __B,
@@ -542,7 +542,7 @@ _mm512_maskz_cvtsph_hf8 (__mmask32 __U, __m512h __A)
 
 extern __inline __m512h
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_cvtpbf8_ph (__m256i __A)
+_mm512_cvtbf8_ph (__m256i __A)
 {
   return (__m512h) _mm512_castsi512_ph ((__m512i) _mm512_slli_epi16 (
         (__m512i) _mm512_cvtepi8_epi16 (__A), 8));
@@ -550,7 +550,7 @@ _mm512_cvtpbf8_ph (__m256i __A)
 
 extern __inline __m512h
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_mask_cvtpbf8_ph (__m512h __S, __mmask16 __U, __m256i __A)
+_mm512_mask_cvtbf8_ph (__m512h __S, __mmask16 __U, __m256i __A)
 {
   return (__m512h) _mm512_castsi512_ph ((__m512i) _mm512_mask_slli_epi16 (
         (__m512i) __S, __U, (__m512i) _mm512_cvtepi8_epi16 (__A), 8));
@@ -558,7 +558,7 @@ _mm512_mask_cvtpbf8_ph (__m512h __S, __mmask16 __U, __m256i 
__A)
 
 extern __inline __m512h
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm512_maskz_cvtpbf8_ph (__mmask16 __U, __m256i __A)
+_mm512_maskz_cvtbf8_ph (__mmask16 __U, __m256i __A)
 {
   return (__m512h) _mm512_castsi512_ph ((__m512i) _mm512_slli_epi16 (
         (__m512i) _mm512_maskz_cvtepi8_epi16 (__U, __A), 8));
diff --git a/gcc/config/i386/avx10_2convertintrin.h 
b/gcc/config/i386/avx10_2convertintrin.h
index 86355662799..3fc51b17435 100644
--- a/gcc/config/i386/avx10_2convertintrin.h
+++ b/gcc/config/i386/avx10_2convertintrin.h
@@ -166,7 +166,7 @@ _mm256_maskz_cvtx_round2ps_ph (__mmask16 __U, __m256 __A,
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtbiasph_pbf8 (__m128i __A, __m128h __B)
+_mm_cvtbiasph_bf8 (__m128i __A, __m128h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2bf8128 ((__v16qi) __A,
                                                     (__v8hf) __B);
@@ -174,8 +174,8 @@ _mm_cvtbiasph_pbf8 (__m128i __A, __m128h __B)
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtbiasph_pbf8 (__m128i __W, __mmask8 __U, __m128i __A,
-                            __m128h __B)
+_mm_mask_cvtbiasph_bf8 (__m128i __W, __mmask8 __U, __m128i __A,
+                       __m128h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2bf8128_mask ((__v16qi) __A,
                                                          (__v8hf) __B,
@@ -185,7 +185,7 @@ _mm_mask_cvtbiasph_pbf8 (__m128i __W, __mmask8 __U, __m128i 
__A,
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtbiasph_pbf8 (__mmask8 __U, __m128i __A, __m128h __B)
+_mm_maskz_cvtbiasph_bf8 (__mmask8 __U, __m128i __A, __m128h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2bf8128_mask ((__v16qi) __A,
                                                          (__v8hf) __B,
@@ -196,7 +196,7 @@ _mm_maskz_cvtbiasph_pbf8 (__mmask8 __U, __m128i __A, 
__m128h __B)
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtbiasph_pbf8 (__m256i __A, __m256h __B)
+_mm256_cvtbiasph_bf8 (__m256i __A, __m256h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2bf8256_mask ((__v32qi) __A,
                                                          (__v16hf) __B,
@@ -207,8 +207,8 @@ _mm256_cvtbiasph_pbf8 (__m256i __A, __m256h __B)
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtbiasph_pbf8 (__m128i __W, __mmask16 __U, __m256i __A,
-                               __m256h __B)
+_mm256_mask_cvtbiasph_bf8 (__m128i __W, __mmask16 __U, __m256i __A,
+                          __m256h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2bf8256_mask ((__v32qi) __A,
                                                          (__v16hf) __B,
@@ -218,7 +218,7 @@ _mm256_mask_cvtbiasph_pbf8 (__m128i __W, __mmask16 __U, 
__m256i __A,
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtbiasph_pbf8 (__mmask16 __U, __m256i __A, __m256h __B)
+_mm256_maskz_cvtbiasph_bf8 (__mmask16 __U, __m256i __A, __m256h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2bf8256_mask ((__v32qi) __A,
                                                          (__v16hf) __B,
@@ -229,7 +229,7 @@ _mm256_maskz_cvtbiasph_pbf8 (__mmask16 __U, __m256i __A, 
__m256h __B)
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtbiassph_pbf8 (__m128i __A, __m128h __B)
+_mm_cvtbiassph_bf8 (__m128i __A, __m128h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2bf8s128 ((__v16qi) __A,
                                                      (__v8hf) __B);
@@ -237,8 +237,8 @@ _mm_cvtbiassph_pbf8 (__m128i __A, __m128h __B)
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtbiassph_pbf8 (__m128i __W, __mmask8 __U,
-                         __m128i __A, __m128h __B)
+_mm_mask_cvtbiassph_bf8 (__m128i __W, __mmask8 __U,
+                        __m128i __A, __m128h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2bf8s128_mask ((__v16qi) __A,
                                                           (__v8hf) __B,
@@ -248,7 +248,7 @@ _mm_mask_cvtbiassph_pbf8 (__m128i __W, __mmask8 __U,
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtbiassph_pbf8 (__mmask8 __U, __m128i __A, __m128h __B)
+_mm_maskz_cvtbiassph_bf8 (__mmask8 __U, __m128i __A, __m128h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2bf8s128_mask ((__v16qi) __A,
                                                           (__v8hf) __B,
@@ -259,7 +259,7 @@ _mm_maskz_cvtbiassph_pbf8 (__mmask8 __U, __m128i __A, 
__m128h __B)
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtbiassph_pbf8 (__m256i __A, __m256h __B)
+_mm256_cvtbiassph_bf8 (__m256i __A, __m256h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2bf8s256_mask ((__v32qi) __A,
                                                           (__v16hf) __B,
@@ -270,8 +270,8 @@ _mm256_cvtbiassph_pbf8 (__m256i __A, __m256h __B)
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtbiassph_pbf8 (__m128i __W, __mmask16 __U,
-                            __m256i __A, __m256h __B)
+_mm256_mask_cvtbiassph_bf8 (__m128i __W, __mmask16 __U,
+                           __m256i __A, __m256h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2bf8s256_mask ((__v32qi) __A,
                                                           (__v16hf) __B,
@@ -281,7 +281,7 @@ _mm256_mask_cvtbiassph_pbf8 (__m128i __W, __mmask16 __U,
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtbiassph_pbf8 (__mmask16 __U, __m256i __A, __m256h __B)
+_mm256_maskz_cvtbiassph_bf8 (__mmask16 __U, __m256i __A, __m256h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2bf8s256_mask ((__v32qi) __A,
                                                           (__v16hf) __B,
@@ -292,7 +292,7 @@ _mm256_maskz_cvtbiassph_pbf8 (__mmask16 __U, __m256i __A, 
__m256h __B)
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtbiasph_phf8 (__m128i __A, __m128h __B)
+_mm_cvtbiasph_hf8 (__m128i __A, __m128h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2hf8128 ((__v16qi) __A,
                                                     (__v8hf) __B);
@@ -300,8 +300,8 @@ _mm_cvtbiasph_phf8 (__m128i __A, __m128h __B)
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtbiasph_phf8 (__m128i __W, __mmask8 __U, __m128i __A,
-                            __m128h __B)
+_mm_mask_cvtbiasph_hf8 (__m128i __W, __mmask8 __U, __m128i __A,
+                       __m128h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2hf8128_mask ((__v16qi) __A,
                                                          (__v8hf) __B,
@@ -311,7 +311,7 @@ _mm_mask_cvtbiasph_phf8 (__m128i __W, __mmask8 __U, __m128i 
__A,
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtbiasph_phf8 (__mmask8 __U, __m128i __A, __m128h __B)
+_mm_maskz_cvtbiasph_hf8 (__mmask8 __U, __m128i __A, __m128h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2hf8128_mask ((__v16qi) __A,
                                                          (__v8hf) __B,
@@ -322,7 +322,7 @@ _mm_maskz_cvtbiasph_phf8 (__mmask8 __U, __m128i __A, 
__m128h __B)
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtbiasph_phf8 (__m256i __A, __m256h __B)
+_mm256_cvtbiasph_hf8 (__m256i __A, __m256h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2hf8256_mask ((__v32qi) __A,
                                                          (__v16hf) __B,
@@ -333,8 +333,8 @@ _mm256_cvtbiasph_phf8 (__m256i __A, __m256h __B)
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtbiasph_phf8 (__m128i __W, __mmask16 __U,
-                           __m256i __A, __m256h __B)
+_mm256_mask_cvtbiasph_hf8 (__m128i __W, __mmask16 __U,
+                          __m256i __A, __m256h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2hf8256_mask ((__v32qi) __A,
                                                          (__v16hf) __B,
@@ -344,7 +344,7 @@ _mm256_mask_cvtbiasph_phf8 (__m128i __W, __mmask16 __U,
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtbiasph_phf8 (__mmask16 __U, __m256i __A, __m256h __B)
+_mm256_maskz_cvtbiasph_hf8 (__mmask16 __U, __m256i __A, __m256h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2hf8256_mask ((__v32qi) __A,
                                                          (__v16hf) __B,
@@ -355,7 +355,7 @@ _mm256_maskz_cvtbiasph_phf8 (__mmask16 __U, __m256i __A, 
__m256h __B)
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtbiassph_phf8 (__m128i __A, __m128h __B)
+_mm_cvtbiassph_hf8 (__m128i __A, __m128h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2hf8s128 ((__v16qi) __A,
                                                      (__v8hf) __B);
@@ -363,8 +363,8 @@ _mm_cvtbiassph_phf8 (__m128i __A, __m128h __B)
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtbiassph_phf8 (__m128i __W, __mmask8 __U,
-                         __m128i __A, __m128h __B)
+_mm_mask_cvtbiassph_hf8 (__m128i __W, __mmask8 __U,
+                        __m128i __A, __m128h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2hf8s128_mask ((__v16qi) __A,
                                                           (__v8hf) __B,
@@ -374,7 +374,7 @@ _mm_mask_cvtbiassph_phf8 (__m128i __W, __mmask8 __U,
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtbiassph_phf8 (__mmask8 __U, __m128i __A, __m128h __B)
+_mm_maskz_cvtbiassph_hf8 (__mmask8 __U, __m128i __A, __m128h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2hf8s128_mask ((__v16qi) __A,
                                                           (__v8hf) __B,
@@ -385,7 +385,7 @@ _mm_maskz_cvtbiassph_phf8 (__mmask8 __U, __m128i __A, 
__m128h __B)
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtbiassph_phf8 (__m256i __A, __m256h __B)
+_mm256_cvtbiassph_hf8 (__m256i __A, __m256h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2hf8s256_mask ((__v32qi) __A,
                                                           (__v16hf) __B,
@@ -396,8 +396,8 @@ _mm256_cvtbiassph_phf8 (__m256i __A, __m256h __B)
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtbiassph_phf8 (__m128i __W, __mmask16 __U,
-                            __m256i __A, __m256h __B)
+_mm256_mask_cvtbiassph_hf8 (__m128i __W, __mmask16 __U,
+                           __m256i __A, __m256h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2hf8s256_mask ((__v32qi) __A,
                                                           (__v16hf) __B,
@@ -407,7 +407,7 @@ _mm256_mask_cvtbiassph_phf8 (__m128i __W, __mmask16 __U,
 
 extern __inline__ __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtbiassph_phf8 (__mmask16 __U, __m256i __A, __m256h __B)
+_mm256_maskz_cvtbiassph_hf8 (__mmask16 __U, __m256i __A, __m256h __B)
 {
   return (__m128i) __builtin_ia32_vcvtbiasph2hf8s256_mask ((__v32qi) __A,
                                                           (__v16hf) __B,
@@ -972,7 +972,7 @@ _mm256_maskz_cvtsph_hf8 (__mmask16 __U, __m256h __A)
 
 extern __inline __m128h
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpbf8_ph (__m128i __A)
+_mm_cvtbf8_ph (__m128i __A)
 {
   return (__m128h) _mm_castsi128_ph ((__m128i) _mm_slli_epi16 (
         (__m128i) _mm_cvtepi8_epi16 (__A), 8));
@@ -980,7 +980,7 @@ _mm_cvtpbf8_ph (__m128i __A)
 
 extern __inline __m128h
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mask_cvtpbf8_ph (__m128h __S, __mmask8 __U, __m128i __A)
+_mm_mask_cvtbf8_ph (__m128h __S, __mmask8 __U, __m128i __A)
 {
   return (__m128h) _mm_castsi128_ph ((__m128i) _mm_mask_slli_epi16 (
         (__m128i) __S, __U, (__m128i) _mm_cvtepi8_epi16 (__A), 8));
@@ -988,7 +988,7 @@ _mm_mask_cvtpbf8_ph (__m128h __S, __mmask8 __U, __m128i __A)
 
 extern __inline __m128h
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskz_cvtpbf8_ph (__mmask8 __U, __m128i __A)
+_mm_maskz_cvtbf8_ph (__mmask8 __U, __m128i __A)
 {
   return (__m128h) _mm_castsi128_ph ((__m128i) _mm_slli_epi16 (
         (__m128i) _mm_maskz_cvtepi8_epi16 (__U, __A), 8));
@@ -996,7 +996,7 @@ _mm_maskz_cvtpbf8_ph (__mmask8 __U, __m128i __A)
 
 extern __inline __m256h
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cvtpbf8_ph (__m128i __A)
+_mm256_cvtbf8_ph (__m128i __A)
 {
   return (__m256h) _mm256_castsi256_ph ((__m256i) _mm256_slli_epi16 (
         (__m256i) _mm256_cvtepi8_epi16 (__A), 8));
@@ -1004,7 +1004,7 @@ _mm256_cvtpbf8_ph (__m128i __A)
 
 extern __inline __m256h
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cvtpbf8_ph (__m256h __S, __mmask8 __U, __m128i __A)
+_mm256_mask_cvtbf8_ph (__m256h __S, __mmask8 __U, __m128i __A)
 {
   return (__m256h) _mm256_castsi256_ph ((__m256i) _mm256_mask_slli_epi16 (
         (__m256i) __S, __U, (__m256i) _mm256_cvtepi8_epi16 (__A), 8));
@@ -1012,7 +1012,7 @@ _mm256_mask_cvtpbf8_ph (__m256h __S, __mmask8 __U, 
__m128i __A)
 
 extern __inline __m256h
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_cvtpbf8_ph (__mmask8 __U, __m128i __A)
+_mm256_maskz_cvtbf8_ph (__mmask8 __U, __m128i __A)
 {
   return (__m256h) _mm256_castsi256_ph ((__m256i) _mm256_slli_epi16 (
         (__m256i) _mm256_maskz_cvtepi8_epi16 (__U, __A), 8));
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-convert-1.c 
b/gcc/testsuite/gcc.target/i386/avx10_2-512-convert-1.c
index 58db35d675a..bda74b5776b 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-512-convert-1.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-convert-1.c
@@ -78,33 +78,33 @@ avx10_2_512_test (void)
 void extern
 avx10_2_512_vcvtbiasph2bf8_test (void)
 {
-  x256i = _mm512_cvtbiasph_pbf8 (x512i, x512h);
-  x256i = _mm512_mask_cvtbiasph_pbf8 (x256i, m32, x512i, x512h);
-  x256i = _mm512_maskz_cvtbiasph_pbf8 (m32, x512i, x512h);
+  x256i = _mm512_cvtbiasph_bf8 (x512i, x512h);
+  x256i = _mm512_mask_cvtbiasph_bf8 (x256i, m32, x512i, x512h);
+  x256i = _mm512_maskz_cvtbiasph_bf8 (m32, x512i, x512h);
 }
 
 void extern
 avx10_2_512_vcvtbiasph2bf8s_test (void)
 {
-  x256i = _mm512_cvtbiassph_pbf8 (x512i, x512h);
-  x256i = _mm512_mask_cvtbiassph_pbf8 (x256i, m32, x512i, x512h);
-  x256i = _mm512_maskz_cvtbiassph_pbf8 (m32, x512i, x512h);
+  x256i = _mm512_cvtbiassph_bf8 (x512i, x512h);
+  x256i = _mm512_mask_cvtbiassph_bf8 (x256i, m32, x512i, x512h);
+  x256i = _mm512_maskz_cvtbiassph_bf8 (m32, x512i, x512h);
 }
 
 void extern
 avx10_2_512_vcvtbiasph2hf8_test (void)
 {
-  x256i = _mm512_cvtbiasph_phf8 (x512i, x512h);
-  x256i = _mm512_mask_cvtbiasph_phf8 (x256i, m32, x512i, x512h);
-  x256i = _mm512_maskz_cvtbiasph_phf8 (m32, x512i, x512h);
+  x256i = _mm512_cvtbiasph_hf8 (x512i, x512h);
+  x256i = _mm512_mask_cvtbiasph_hf8 (x256i, m32, x512i, x512h);
+  x256i = _mm512_maskz_cvtbiasph_hf8 (m32, x512i, x512h);
 }
 
 void extern
 avx10_2_512_vcvtbiasph2hf8s_test (void)
 {
-  x256i = _mm512_cvtbiassph_phf8 (x512i, x512h);
-  x256i = _mm512_mask_cvtbiassph_phf8 (x256i, m32, x512i, x512h);
-  x256i = _mm512_maskz_cvtbiassph_phf8 (m32, x512i, x512h);
+  x256i = _mm512_cvtbiassph_hf8 (x512i, x512h);
+  x256i = _mm512_mask_cvtbiassph_hf8 (x256i, m32, x512i, x512h);
+  x256i = _mm512_maskz_cvtbiassph_hf8 (m32, x512i, x512h);
 }
 
 void extern
@@ -182,7 +182,7 @@ avx10_2_512_vcvtph2hf8s_test (void)
 void extern
 avx10_2_512_cvtbf8_fp16_test (void)
 {
-  y = _mm512_cvtpbf8_ph (z1);
-  y = _mm512_mask_cvtpbf8_ph (z, m16, z1);
-  y = _mm512_maskz_cvtpbf8_ph (m16, z1);
+  y = _mm512_cvtbf8_ph (z1);
+  y = _mm512_mask_cvtbf8_ph (z, m16, z1);
+  y = _mm512_maskz_cvtbf8_ph (m16, z1);
 }
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2bf8-2.c 
b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2bf8-2.c
index 55b94309d16..c5edce61490 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2bf8-2.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2bf8-2.c
@@ -60,16 +60,16 @@ TEST (void)
 
   CALC (res_ref, src1.a, src2.a);
 
-  res1.x = INTRINSIC (_cvtbiasph_pbf8) (src1.x, src2.x);
+  res1.x = INTRINSIC (_cvtbiasph_bf8) (src1.x, src2.x);
   if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res1, res_ref))
     abort ();
 
-  res2.x = INTRINSIC (_mask_cvtbiasph_pbf8) (res2.x, mask, src1.x, src2.x);
+  res2.x = INTRINSIC (_mask_cvtbiasph_bf8) (res2.x, mask, src1.x, src2.x);
   MASK_MERGE (i_b) (res_ref, mask, SIZE);
   if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res2, res_ref))
     abort ();
 
-  res3.x = INTRINSIC (_maskz_cvtbiasph_pbf8) (mask, src1.x, src2.x);
+  res3.x = INTRINSIC (_maskz_cvtbiasph_bf8) (mask, src1.x, src2.x);
   MASK_ZERO (i_b) (res_ref, mask, SIZE);
   if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res3, res_ref))
     abort ();
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2bf8s-2.c 
b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2bf8s-2.c
index be5fa4961f7..c454cb57bff 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2bf8s-2.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2bf8s-2.c
@@ -61,16 +61,16 @@ TEST (void)
   CALC (res_ref, src1.a, src2.a);
 
 
-  res1.x = INTRINSIC (_cvtbiassph_pbf8) (src1.x, src2.x);
+  res1.x = INTRINSIC (_cvtbiassph_bf8) (src1.x, src2.x);
   if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res1, res_ref))
     abort ();
 
-  res2.x = INTRINSIC (_mask_cvtbiassph_pbf8) (res2.x, mask, src1.x, src2.x);
+  res2.x = INTRINSIC (_mask_cvtbiassph_bf8) (res2.x, mask, src1.x, src2.x);
   MASK_MERGE (i_b) (res_ref, mask, SIZE);
   if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res2, res_ref))
     abort ();
 
-  res3.x = INTRINSIC (_maskz_cvtbiassph_pbf8) (mask, src1.x, src2.x);
+  res3.x = INTRINSIC (_maskz_cvtbiassph_bf8) (mask, src1.x, src2.x);
   MASK_ZERO (i_b) (res_ref, mask, SIZE);
   if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res3, res_ref))
     abort ();
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2hf8-2.c 
b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2hf8-2.c
index ebae54332ae..84f19ae2d16 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2hf8-2.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2hf8-2.c
@@ -60,16 +60,16 @@ TEST (void)
 
   CALC (res_ref, src1.a, src2.a);
 
-  res1.x = INTRINSIC (_cvtbiasph_phf8) (src1.x, src2.x);
+  res1.x = INTRINSIC (_cvtbiasph_hf8) (src1.x, src2.x);
   if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res1, res_ref))
     abort ();
 
-  res2.x = INTRINSIC (_mask_cvtbiasph_phf8) (res2.x, mask, src1.x, src2.x);
+  res2.x = INTRINSIC (_mask_cvtbiasph_hf8) (res2.x, mask, src1.x, src2.x);
   MASK_MERGE (i_b) (res_ref, mask, SIZE);
   if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res2, res_ref))
     abort ();
 
-  res3.x = INTRINSIC (_maskz_cvtbiasph_phf8) (mask, src1.x, src2.x);
+  res3.x = INTRINSIC (_maskz_cvtbiasph_hf8) (mask, src1.x, src2.x);
   MASK_ZERO (i_b) (res_ref, mask, SIZE);
   if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res3, res_ref))
     abort ();
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2hf8s-2.c 
b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2hf8s-2.c
index fdd95191acb..2630c694fa5 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2hf8s-2.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-512-vcvtbiasph2hf8s-2.c
@@ -60,16 +60,16 @@ TEST (void)
 
   CALC (res_ref, src1.a, src2.a);
 
-  res1.x = INTRINSIC (_cvtbiassph_phf8) (src1.x, src2.x);
+  res1.x = INTRINSIC (_cvtbiassph_hf8) (src1.x, src2.x);
   if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res1, res_ref))
     abort ();
 
-  res2.x = INTRINSIC (_mask_cvtbiassph_phf8) (res2.x, mask, src1.x, src2.x);
+  res2.x = INTRINSIC (_mask_cvtbiassph_hf8) (res2.x, mask, src1.x, src2.x);
   MASK_MERGE (i_b) (res_ref, mask, SIZE);
   if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res2, res_ref))
     abort ();
 
-  res3.x = INTRINSIC (_maskz_cvtbiassph_phf8) (mask, src1.x, src2.x);
+  res3.x = INTRINSIC (_maskz_cvtbiassph_hf8) (mask, src1.x, src2.x);
   MASK_ZERO (i_b) (res_ref, mask, SIZE);
   if (UNION_CHECK (AVX512F_LEN_HALF, i_b) (res3, res_ref))
     abort ();
diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-convert-1.c 
b/gcc/testsuite/gcc.target/i386/avx10_2-convert-1.c
index 6bc6b183e34..0e8edd17f6c 100644
--- a/gcc/testsuite/gcc.target/i386/avx10_2-convert-1.c
+++ b/gcc/testsuite/gcc.target/i386/avx10_2-convert-1.c
@@ -132,49 +132,49 @@ avx10_2_test (void)
 void extern
 avx10_2_vcvtbiasph2bf8_test (void)
 {
-  x128i = _mm_cvtbiasph_pbf8 (x128i, x128h);
-  x128i = _mm_mask_cvtbiasph_pbf8 (x128i, m8, x128i, x128h);
-  x128i = _mm_maskz_cvtbiasph_pbf8 (m8, x128i, x128h);
+  x128i = _mm_cvtbiasph_bf8 (x128i, x128h);
+  x128i = _mm_mask_cvtbiasph_bf8 (x128i, m8, x128i, x128h);
+  x128i = _mm_maskz_cvtbiasph_bf8 (m8, x128i, x128h);
 
-  x128i = _mm256_cvtbiasph_pbf8 (x256i, x256h);
-  x128i = _mm256_mask_cvtbiasph_pbf8 (x128i, m16, x256i, x256h);
-  x128i = _mm256_maskz_cvtbiasph_pbf8 (m16, x256i, x256h);
+  x128i = _mm256_cvtbiasph_bf8 (x256i, x256h);
+  x128i = _mm256_mask_cvtbiasph_bf8 (x128i, m16, x256i, x256h);
+  x128i = _mm256_maskz_cvtbiasph_bf8 (m16, x256i, x256h);
 }
 
 void extern
 avx10_2_vcvtbiasph2bf8s_test (void)
 {
-  x128i = _mm_cvtbiassph_pbf8 (x128i, x128h);
-  x128i = _mm_mask_cvtbiassph_pbf8 (x128i, m8, x128i, x128h);
-  x128i = _mm_maskz_cvtbiassph_pbf8 (m8, x128i, x128h);
+  x128i = _mm_cvtbiassph_bf8 (x128i, x128h);
+  x128i = _mm_mask_cvtbiassph_bf8 (x128i, m8, x128i, x128h);
+  x128i = _mm_maskz_cvtbiassph_bf8 (m8, x128i, x128h);
 
-  x128i = _mm256_cvtbiassph_pbf8 (x256i, x256h);
-  x128i = _mm256_mask_cvtbiassph_pbf8 (x128i, m16, x256i, x256h);
-  x128i = _mm256_maskz_cvtbiassph_pbf8 (m16, x256i, x256h);
+  x128i = _mm256_cvtbiassph_bf8 (x256i, x256h);
+  x128i = _mm256_mask_cvtbiassph_bf8 (x128i, m16, x256i, x256h);
+  x128i = _mm256_maskz_cvtbiassph_bf8 (m16, x256i, x256h);
 }
 
 void extern
 avx10_2_vcvtbiasph2hf8_test (void)
 {
-  x128i = _mm_cvtbiasph_phf8 (x128i, x128h);
-  x128i = _mm_mask_cvtbiasph_phf8 (x128i, m8, x128i, x128h);
-  x128i = _mm_maskz_cvtbiasph_phf8 (m8, x128i, x128h);
+  x128i = _mm_cvtbiasph_hf8 (x128i, x128h);
+  x128i = _mm_mask_cvtbiasph_hf8 (x128i, m8, x128i, x128h);
+  x128i = _mm_maskz_cvtbiasph_hf8 (m8, x128i, x128h);
 
-  x128i = _mm256_cvtbiasph_phf8 (x256i, x256h);
-  x128i = _mm256_mask_cvtbiasph_phf8 (x128i, m16, x256i, x256h);
-  x128i = _mm256_maskz_cvtbiasph_phf8 (m16, x256i, x256h);
+  x128i = _mm256_cvtbiasph_hf8 (x256i, x256h);
+  x128i = _mm256_mask_cvtbiasph_hf8 (x128i, m16, x256i, x256h);
+  x128i = _mm256_maskz_cvtbiasph_hf8 (m16, x256i, x256h);
 }
 
 void extern
 avx10_2_vcvtbiasph2hf8s_test (void)
 {
-  x128i = _mm_cvtbiassph_phf8 (x128i, x128h);
-  x128i = _mm_mask_cvtbiassph_phf8 (x128i, m8, x128i, x128h);
-  x128i = _mm_maskz_cvtbiassph_phf8 (m8, x128i, x128h);
+  x128i = _mm_cvtbiassph_hf8 (x128i, x128h);
+  x128i = _mm_mask_cvtbiassph_hf8 (x128i, m8, x128i, x128h);
+  x128i = _mm_maskz_cvtbiassph_hf8 (m8, x128i, x128h);
 
-  x128i = _mm256_cvtbiassph_phf8 (x256i, x256h);
-  x128i = _mm256_mask_cvtbiassph_phf8 (x128i, m16, x256i, x256h);
-  x128i = _mm256_maskz_cvtbiassph_phf8 (m16, x256i, x256h);
+  x128i = _mm256_cvtbiassph_hf8 (x256i, x256h);
+  x128i = _mm256_mask_cvtbiassph_hf8 (x128i, m16, x256i, x256h);
+  x128i = _mm256_maskz_cvtbiassph_hf8 (m16, x256i, x256h);
 }
 
 void extern
@@ -284,11 +284,11 @@ avx10_2_vcvtph2hf8s_test (void)
 void extern
 avx10_2_cvtbf8_fp16_test (void)
 {
-  y = _mm_cvtpbf8_ph (z3);
-  y = _mm_mask_cvtpbf8_ph (z, m8, z3);
-  y = _mm_maskz_cvtpbf8_ph (m8, z3);
+  y = _mm_cvtbf8_ph (z3);
+  y = _mm_mask_cvtbf8_ph (z, m8, z3);
+  y = _mm_maskz_cvtbf8_ph (m8, z3);
 
-  y2 = _mm256_cvtpbf8_ph (z3);
-  y2 = _mm256_mask_cvtpbf8_ph (z2, m8, z3);
-  y2 = _mm256_maskz_cvtpbf8_ph (m8, z3);
+  y2 = _mm256_cvtbf8_ph (z3);
+  y2 = _mm256_mask_cvtbf8_ph (z2, m8, z3);
+  y2 = _mm256_maskz_cvtbf8_ph (m8, z3);
 }
-- 
2.31.1

Reply via email to