https://gcc.gnu.org/g:ce16fa99857c057ad95ad7cef8ce6f5ffbe9ef48
commit ce16fa99857c057ad95ad7cef8ce6f5ffbe9ef48 Author: Hu, Lin1 <lin1...@intel.com> Date: Thu Aug 15 09:38:26 2024 +0800 AVX10.2 ymm rounding: Support v{max,min}p{s,d,h} intrins gcc/ChangeLog: * config/i386/avx10_2roundingintrin.h: New intrins. * config/i386/i386-builtin.def (BDESC): Add new builtins. gcc/testsuite/ChangeLog: * gcc.target/i386/avx-1.c: Add new builtin test. * gcc.target/i386/sse-13.c: Ditto. * gcc.target/i386/sse-14.c: Ditto. * gcc.target/i386/sse-22.c: Add new macro test. * gcc.target/i386/sse-23.c: Ditto. * gcc.target/i386/avx10_2-rounding-3.c: Add test. Diff: --- gcc/config/i386/avx10_2roundingintrin.h | 360 +++++++++++++++++++++ gcc/config/i386/i386-builtin.def | 6 + gcc/testsuite/gcc.target/i386/avx-1.c | 6 + gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c | 50 +++ gcc/testsuite/gcc.target/i386/sse-13.c | 6 + gcc/testsuite/gcc.target/i386/sse-14.c | 18 ++ gcc/testsuite/gcc.target/i386/sse-22.c | 18 ++ gcc/testsuite/gcc.target/i386/sse-23.c | 6 + 8 files changed, 470 insertions(+) diff --git a/gcc/config/i386/avx10_2roundingintrin.h b/gcc/config/i386/avx10_2roundingintrin.h index 07729a6cc04f..a5712f5230aa 100644 --- a/gcc/config/i386/avx10_2roundingintrin.h +++ b/gcc/config/i386/avx10_2roundingintrin.h @@ -3232,6 +3232,228 @@ _mm256_maskz_getmant_round_ps (__mmask8 __U, __m256 __A, _mm256_setzero_ps (), __U, __R); } + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_max_round_pd (__m256d __A, __m256d __B, const int __R) +{ + return (__m256d) __builtin_ia32_maxpd256_mask_round ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_undefined_pd (), + (__mmask8) -1, + __R); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_max_round_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B, const int __R) +{ + return (__m256d) __builtin_ia32_maxpd256_mask_round ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_max_round_pd (__mmask8 __U, __m256d __A, __m256d __B, + const int __R) +{ + return (__m256d) __builtin_ia32_maxpd256_mask_round ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U, + __R); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_max_round_ph (__m256h __A, __m256h __B, const int __R) +{ + return (__m256h) __builtin_ia32_maxph256_mask_round ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) + _mm256_undefined_ph (), + (__mmask16) -1, + __R); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_max_round_ph (__m256h __W, __mmask16 __U, __m256h __A, + __m256h __B, const int __R) +{ + return (__m256h) __builtin_ia32_maxph256_mask_round ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __W, + (__mmask16) __U, + __R); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_max_round_ph (__mmask16 __U, __m256h __A, __m256h __B, + const int __R) +{ + return (__m256h) __builtin_ia32_maxph256_mask_round ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) + _mm256_setzero_ph (), + (__mmask16) __U, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_max_round_ps (__m256 __A, __m256 __B, const int __R) +{ + return (__m256) __builtin_ia32_maxps256_mask_round ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_undefined_ps (), + (__mmask8) -1, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_max_round_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B, + const int __R) +{ + return (__m256) __builtin_ia32_maxps256_mask_round ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_max_round_ps (__mmask8 __U, __m256 __A, __m256 __B, + const int __R) +{ + return (__m256) __builtin_ia32_maxps256_mask_round ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U, + __R); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_min_round_pd (__m256d __A, __m256d __B, const int __R) +{ + return (__m256d) __builtin_ia32_minpd256_mask_round ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_undefined_pd (), + (__mmask8) -1, + __R); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_min_round_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B, const int __R) +{ + return (__m256d) __builtin_ia32_minpd256_mask_round ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_min_round_pd (__mmask8 __U, __m256d __A, __m256d __B, + const int __R) +{ + return (__m256d) __builtin_ia32_minpd256_mask_round ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U, + __R); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_min_round_ph (__m256h __A, __m256h __B, const int __R) +{ + return (__m256h) __builtin_ia32_minph256_mask_round ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) + _mm256_undefined_ph (), + (__mmask16) -1, + __R); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_min_round_ph (__m256h __W, __mmask16 __U, __m256h __A, + __m256h __B, const int __R) +{ + return (__m256h) __builtin_ia32_minph256_mask_round ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __W, + (__mmask16) __U, + __R); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_min_round_ph (__mmask16 __U, __m256h __A, __m256h __B, + const int __R) +{ + return (__m256h) __builtin_ia32_minph256_mask_round ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) + _mm256_setzero_ph (), + (__mmask16) __U, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_min_round_ps (__m256 __A, __m256 __B, const int __R) +{ + return (__m256) __builtin_ia32_minps256_mask_round ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_undefined_ps (), + (__mmask8) -1, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_min_round_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B, + const int __R) +{ + return (__m256) __builtin_ia32_minps256_mask_round ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_min_round_ps (__mmask8 __U, __m256 __A, __m256 __B, + const int __R) +{ + return (__m256) __builtin_ia32_minps256_mask_round ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U, + __R); +} #else #define _mm256_add_round_pd(A, B, R) \ ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \ @@ -4850,6 +5072,144 @@ _mm256_maskz_getmant_round_ps (__mmask8 __U, __m256 __A, _mm256_setzero_ps (), \ (__mmask8) (U), \ (R))) + +#define _mm256_max_round_pd(A, B, R) \ + ((__m256d) __builtin_ia32_maxpd256_mask_round ((__v4df) (A), \ + (__v4df) (B), \ + (__v4df) \ + (_mm256_undefined_pd ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_max_round_pd(W, U, A, B, R) \ + ((__m256d) __builtin_ia32_maxpd256_mask_round ((__v4df) (A), \ + (__v4df) (B), \ + (__v4df) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_max_round_pd(U, A, B, R) \ + ((__m256d) __builtin_ia32_maxpd256_mask_round ((__v4df) (A), \ + (__v4df) (B), \ + (__v4df) \ + (_mm256_setzero_pd ()), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_max_round_ph(A, B, R) \ + ((__m256h) __builtin_ia32_maxph256_mask_round ((__v16hf) (A), \ + (__v16hf) (B), \ + (__v16hf) \ + (_mm256_undefined_ph ()), \ + (__mmask16) (-1), \ + (R))) + +#define _mm256_mask_max_round_ph(W, U, A, B, R) \ + ((__m256h) __builtin_ia32_maxph256_mask_round ((__v16hf) (A), \ + (__v16hf) (B), \ + (__v16hf) (W), \ + (__mmask16) (U), \ + (R))) + +#define _mm256_maskz_max_round_ph(U, A, B, R) \ + ((__m256h) __builtin_ia32_maxph256_mask_round ((__v16hf) (A), \ + (__v16hf) (B), \ + (__v16hf) \ + (_mm256_setzero_ph ()), \ + (__mmask16) (U), \ + (R))) + +#define _mm256_max_round_ps(A, B, R) \ + ((__m256) __builtin_ia32_maxps256_mask_round ((__v8sf) (A), \ + (__v8sf) (B), \ + (__v8sf) \ + (_mm256_undefined_ps ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_max_round_ps(W, U, A, B, R) \ + ((__m256) __builtin_ia32_maxps256_mask_round ((__v8sf) (A), \ + (__v8sf) (B), \ + (__v8sf) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_max_round_ps(U, A, B, R) \ + ((__m256) __builtin_ia32_maxps256_mask_round ((__v8sf) (A), \ + (__v8sf) (B), \ + (__v8sf) \ + (_mm256_setzero_ps ()), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_min_round_pd(A, B, R) \ + ((__m256d) __builtin_ia32_minpd256_mask_round ((__v4df) (A), \ + (__v4df) (B), \ + (__v4df) \ + (_mm256_undefined_pd ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_min_round_pd(W, U, A, B, R) \ + ((__m256d) __builtin_ia32_minpd256_mask_round ((__v4df) (A), \ + (__v4df) (B), \ + (__v4df) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_min_round_pd(U, A, B, R) \ + ((__m256d) __builtin_ia32_minpd256_mask_round ((__v4df) (A), \ + (__v4df) (B), \ + (__v4df) \ + (_mm256_setzero_pd ()), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_min_round_ph(A, B, R) \ + ((__m256h) __builtin_ia32_minph256_mask_round ((__v16hf) (A), \ + (__v16hf) (B), \ + (__v16hf) \ + (_mm256_undefined_ph ()), \ + (__mmask16) (-1), \ + (R))) + +#define _mm256_mask_min_round_ph(W, U, A, B, R) \ + ((__m256h) __builtin_ia32_minph256_mask_round ((__v16hf) (A), \ + (__v16hf) (B), \ + (__v16hf) (W), \ + (__mmask16) (U), \ + (R))) + +#define _mm256_maskz_min_round_ph(U, A, B, R) \ + ((__m256h) __builtin_ia32_minph256_mask_round ((__v16hf) (A), \ + (__v16hf) (B), \ + (__v16hf) \ + (_mm256_setzero_ph ()), \ + (__mmask16) (U), \ + (R))) + +#define _mm256_min_round_ps(A, B, R) \ + ((__m256) __builtin_ia32_minps256_mask_round ((__v8sf) (A), \ + (__v8sf) (B), \ + (__v8sf) \ + (_mm256_undefined_ps ()), \ + (__mmask8) (-1), \ + (R))) + +#define _mm256_mask_min_round_ps(W, U, A, B, R) \ + ((__m256) __builtin_ia32_minps256_mask_round ((__v8sf) (A), \ + (__v8sf) (B), \ + (__v8sf) (W), \ + (__mmask8) (U), \ + (R))) + +#define _mm256_maskz_min_round_ps(U, A, B, R) \ + ((__m256) __builtin_ia32_minps256_mask_round ((__v8sf) (A), \ + (__v8sf) (B), \ + (__v8sf) \ + (_mm256_setzero_ps ()), \ + (__mmask8) (U), \ + (R))) #endif #define _mm256_cmul_round_pch(A, B, R) _mm256_fcmul_round_pch ((A), (B), (R)) diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def index d53805314728..e5f837133ac8 100644 --- a/gcc/config/i386/i386-builtin.def +++ b/gcc/config/i386/i386-builtin.def @@ -3451,6 +3451,12 @@ BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_getexpv8sf_mask_round, BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_getmantv4df_mask_round, "__builtin_ia32_getmantpd256_mask_round", IX86_BUILTIN_VGETMANTPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT_V4DF_UQI_INT) BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_getmantv16hf_mask_round, "__builtin_ia32_getmantph256_mask_round", IX86_BUILTIN_VGETMANTPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_INT_V16HF_UHI_INT) BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_getmantv8sf_mask_round, "__builtin_ia32_getmantps256_mask_round", IX86_BUILTIN_VGETMANTPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT_V8SF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_smaxv4df3_mask_round, "__builtin_ia32_maxpd256_mask_round", IX86_BUILTIN_VMAXPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_smaxv16hf3_mask_round, "__builtin_ia32_maxph256_mask_round", IX86_BUILTIN_VMAXPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_smaxv8sf3_mask_round, "__builtin_ia32_maxps256_mask_round", IX86_BUILTIN_VMAXPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_sminv4df3_mask_round, "__builtin_ia32_minpd256_mask_round", IX86_BUILTIN_VMINPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_sminv16hf3_mask_round, "__builtin_ia32_minph256_mask_round", IX86_BUILTIN_VMINPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT) +BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_sminv8sf3_mask_round, "__builtin_ia32_minps256_mask_round", IX86_BUILTIN_VMINPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT) BDESC_END (ROUND_ARGS, MULTI_ARG) diff --git a/gcc/testsuite/gcc.target/i386/avx-1.c b/gcc/testsuite/gcc.target/i386/avx-1.c index 4ce009847dc2..9b1d808525f8 100644 --- a/gcc/testsuite/gcc.target/i386/avx-1.c +++ b/gcc/testsuite/gcc.target/i386/avx-1.c @@ -975,6 +975,12 @@ #define __builtin_ia32_getmantpd256_mask_round(A, F, C, D, E) __builtin_ia32_getmantpd256_mask_round(A, 1, C, D, 8) #define __builtin_ia32_getmantph256_mask_round(A, F, C, D, E) __builtin_ia32_getmantph256_mask_round(A, 1, C, D, 8) #define __builtin_ia32_getmantps256_mask_round(A, F, C, D, E) __builtin_ia32_getmantps256_mask_round(A, 1, C, D, 8) +#define __builtin_ia32_maxpd256_mask_round(A, B, C, D, E) __builtin_ia32_maxpd256_mask_round(A, B, C, D, 8) +#define __builtin_ia32_maxph256_mask_round(A, B, C, D, E) __builtin_ia32_maxph256_mask_round(A, B, C, D, 8) +#define __builtin_ia32_maxps256_mask_round(A, B, C, D, E) __builtin_ia32_maxps256_mask_round(A, B, C, D, 8) +#define __builtin_ia32_minpd256_mask_round(A, B, C, D, E) __builtin_ia32_minpd256_mask_round(A, B, C, D, 8) +#define __builtin_ia32_minph256_mask_round(A, B, C, D, E) __builtin_ia32_minph256_mask_round(A, B, C, D, 8) +#define __builtin_ia32_minps256_mask_round(A, B, C, D, E) __builtin_ia32_minps256_mask_round(A, B, C, D, 8) #include <wmmintrin.h> #include <immintrin.h> diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c b/gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c index 3bed69ebeabd..aa3b3ab070ea 100644 --- a/gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c +++ b/gcc/testsuite/gcc.target/i386/avx10_2-rounding-3.c @@ -123,6 +123,24 @@ /* { dg-final { scan-assembler-times "vgetmantps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */ /* { dg-final { scan-assembler-times "vgetmantps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */ /* { dg-final { scan-assembler-times "vgetmantps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vmaxpd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vmaxpd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vmaxpd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vmaxph\[ \\t\]+\{sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vmaxph\[ \\t\]+\{sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vmaxph\[ \\t\]+\{sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vmaxps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vmaxps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vmaxps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vminpd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vminpd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vminpd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vminph\[ \\t\]+\{sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vminph\[ \\t\]+\{sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vminph\[ \\t\]+\{sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vminps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vminps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */ +/* { dg-final { scan-assembler-times "vminps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */ #include <immintrin.h> @@ -381,3 +399,35 @@ avx10_2_test_17 (void) x = _mm256_maskz_getmant_round_ps (m8, x, _MM_MANT_NORM_p75_1p5, _MM_MANT_SIGN_src, _MM_FROUND_NO_EXC); } + +void extern +avx10_2_test_18 (void) +{ + xd = _mm256_max_round_pd (xd, xd, _MM_FROUND_NO_EXC); + xd = _mm256_mask_max_round_pd (xd, m8, xd, xd, _MM_FROUND_NO_EXC); + xd = _mm256_maskz_max_round_pd (m8, xd, xd, _MM_FROUND_NO_EXC); + + xh = _mm256_max_round_ph (xh, xh, _MM_FROUND_NO_EXC); + xh = _mm256_mask_max_round_ph (xh, m16, xh, xh, _MM_FROUND_NO_EXC); + xh = _mm256_maskz_max_round_ph (m16, xh, xh, _MM_FROUND_NO_EXC); + + x = _mm256_max_round_ps (x, x, _MM_FROUND_NO_EXC); + x = _mm256_mask_max_round_ps (x, m8, x, x, _MM_FROUND_NO_EXC); + x = _mm256_maskz_max_round_ps (m8, x, x, _MM_FROUND_NO_EXC); +} + +void extern +avx10_2_test_19 (void) +{ + xd = _mm256_min_round_pd (xd, xd, _MM_FROUND_NO_EXC); + xd = _mm256_mask_min_round_pd (xd, m8, xd, xd, _MM_FROUND_NO_EXC); + xd = _mm256_maskz_min_round_pd (m8, xd, xd, _MM_FROUND_NO_EXC); + + xh = _mm256_min_round_ph (xh, xh, _MM_FROUND_NO_EXC); + xh = _mm256_mask_min_round_ph (xh, m16, xh, xh, _MM_FROUND_NO_EXC); + xh = _mm256_maskz_min_round_ph (m16, xh, xh, _MM_FROUND_NO_EXC); + + x = _mm256_min_round_ps (x, x, _MM_FROUND_NO_EXC); + x = _mm256_mask_min_round_ps (x, m8, x, x, _MM_FROUND_NO_EXC); + x = _mm256_maskz_min_round_ps (m8, x, x, _MM_FROUND_NO_EXC); +} diff --git a/gcc/testsuite/gcc.target/i386/sse-13.c b/gcc/testsuite/gcc.target/i386/sse-13.c index 04e0d59a09ca..a0276e3e1427 100644 --- a/gcc/testsuite/gcc.target/i386/sse-13.c +++ b/gcc/testsuite/gcc.target/i386/sse-13.c @@ -982,5 +982,11 @@ #define __builtin_ia32_getmantpd256_mask_round(A, F, C, D, E) __builtin_ia32_getmantpd256_mask_round(A, 1, C, D, 8) #define __builtin_ia32_getmantph256_mask_round(A, F, C, D, E) __builtin_ia32_getmantph256_mask_round(A, 1, C, D, 8) #define __builtin_ia32_getmantps256_mask_round(A, F, C, D, E) __builtin_ia32_getmantps256_mask_round(A, 1, C, D, 8) +#define __builtin_ia32_maxpd256_mask_round(A, B, C, D, E) __builtin_ia32_maxpd256_mask_round(A, B, C, D, 8) +#define __builtin_ia32_maxph256_mask_round(A, B, C, D, E) __builtin_ia32_maxph256_mask_round(A, B, C, D, 8) +#define __builtin_ia32_maxps256_mask_round(A, B, C, D, E) __builtin_ia32_maxps256_mask_round(A, B, C, D, 8) +#define __builtin_ia32_minpd256_mask_round(A, B, C, D, E) __builtin_ia32_minpd256_mask_round(A, B, C, D, 8) +#define __builtin_ia32_minph256_mask_round(A, B, C, D, E) __builtin_ia32_minph256_mask_round(A, B, C, D, 8) +#define __builtin_ia32_minps256_mask_round(A, B, C, D, E) __builtin_ia32_minps256_mask_round(A, B, C, D, 8) #include <x86intrin.h> diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c b/gcc/testsuite/gcc.target/i386/sse-14.c index ceb3a65bf7fb..ed78fef8a05d 100644 --- a/gcc/testsuite/gcc.target/i386/sse-14.c +++ b/gcc/testsuite/gcc.target/i386/sse-14.c @@ -1129,6 +1129,12 @@ test_2 (_mm256_fcmul_round_pch, __m256h, __m256h, __m256h, 8) test_2 (_mm256_maskz_getexp_round_pd, __m256d, __mmask8, __m256d, 8) test_2 (_mm256_maskz_getexp_round_ph, __m256h, __mmask16, __m256h, 8) test_2 (_mm256_maskz_getexp_round_ps, __m256, __mmask8, __m256, 8) +test_2 (_mm256_max_round_pd, __m256d, __m256d, __m256d, 8) +test_2 (_mm256_max_round_ph, __m256h, __m256h, __m256h, 8) +test_2 (_mm256_max_round_ps, __m256, __m256, __m256, 8) +test_2 (_mm256_min_round_pd, __m256d, __m256d, __m256d, 8) +test_2 (_mm256_min_round_ph, __m256h, __m256h, __m256h, 8) +test_2 (_mm256_min_round_ps, __m256, __m256, __m256, 8) test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8) test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8) test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8) @@ -1213,6 +1219,12 @@ test_3 (_mm256_fnmsub_round_ps, __m256, __m256, __m256, __m256, 9) test_3 (_mm256_mask_getexp_round_pd, __m256d, __m256d, __mmask8, __m256d, 8) test_3 (_mm256_mask_getexp_round_ph, __m256h, __m256h, __mmask16, __m256h, 8) test_3 (_mm256_mask_getexp_round_ps, __m256, __m256, __mmask8, __m256, 8) +test_3 (_mm256_maskz_max_round_pd, __m256d, __mmask8, __m256d, __m256d, 8) +test_3 (_mm256_maskz_max_round_ph, __m256h, __mmask16, __m256h, __m256h, 8) +test_3 (_mm256_maskz_max_round_ps, __m256, __mmask8, __m256, __m256, 8) +test_3 (_mm256_maskz_min_round_pd, __m256d, __mmask8, __m256d, __m256d, 8) +test_3 (_mm256_maskz_min_round_ph, __m256h, __mmask16, __m256h, __m256h, 8) +test_3 (_mm256_maskz_min_round_ps, __m256, __mmask8, __m256, __m256, 8) test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8) test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8) test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8) @@ -1289,6 +1301,12 @@ test_4 (_mm256_maskz_fnmsub_round_ph, __m256h,__mmask16, __m256h, __m256h, __m25 test_4 (_mm256_mask_fnmsub_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9) test_4 (_mm256_mask3_fnmsub_round_ps, __m256, __m256, __m256, __m256, __mmask8, 9) test_4 (_mm256_maskz_fnmsub_round_ps, __m256,__mmask8, __m256, __m256, __m256, 9) +test_4 (_mm256_mask_max_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 8) +test_4 (_mm256_mask_max_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 8) +test_4 (_mm256_mask_max_round_ps, __m256, __m256, __mmask8, __m256, __m256, 8) +test_4 (_mm256_mask_min_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 8) +test_4 (_mm256_mask_min_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 8) +test_4 (_mm256_mask_min_round_ps, __m256, __m256, __mmask8, __m256, __m256, 8) test_4x (_mm256_maskz_fixupimm_round_pd, __m256d, __mmask8, __m256d, __m256d, __m256i, 3, 8) test_4x (_mm256_maskz_fixupimm_round_ps, __m256, __mmask8, __m256, __m256, __m256i, 3, 8) test_4x (_mm256_mask_fixupimm_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256i, 3, 8) diff --git a/gcc/testsuite/gcc.target/i386/sse-22.c b/gcc/testsuite/gcc.target/i386/sse-22.c index 6457e750e3bc..2667aa8bf57a 100644 --- a/gcc/testsuite/gcc.target/i386/sse-22.c +++ b/gcc/testsuite/gcc.target/i386/sse-22.c @@ -1172,6 +1172,12 @@ test_2 (_mm256_fcmul_round_pch, __m256h, __m256h, __m256h, 8) test_2 (_mm256_maskz_getexp_round_pd, __m256d, __mmask8, __m256d, 8) test_2 (_mm256_maskz_getexp_round_ph, __m256h, __mmask16, __m256h, 8) test_2 (_mm256_maskz_getexp_round_ps, __m256, __mmask8, __m256, 8) +test_2 (_mm256_max_round_pd, __m256d, __m256d, __m256d, 8) +test_2 (_mm256_max_round_ph, __m256h, __m256h, __m256h, 8) +test_2 (_mm256_max_round_ps, __m256, __m256, __m256, 8) +test_2 (_mm256_min_round_pd, __m256d, __m256d, __m256d, 8) +test_2 (_mm256_min_round_ph, __m256h, __m256h, __m256h, 8) +test_2 (_mm256_min_round_ps, __m256, __m256, __m256, 8) test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8) test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8) test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8) @@ -1255,6 +1261,12 @@ test_3 (_mm256_fnmsub_round_ps, __m256, __m256, __m256, __m256, 9) test_3 (_mm256_mask_getexp_round_pd, __m256d, __m256d, __mmask8, __m256d, 8) test_3 (_mm256_mask_getexp_round_ph, __m256h, __m256h, __mmask16, __m256h, 8) test_3 (_mm256_mask_getexp_round_ps, __m256, __m256, __mmask8, __m256, 8) +test_3 (_mm256_maskz_max_round_pd, __m256d, __mmask8, __m256d, __m256d, 8) +test_3 (_mm256_maskz_max_round_ph, __m256h, __mmask16, __m256h, __m256h, 8) +test_3 (_mm256_maskz_max_round_ps, __m256, __mmask8, __m256, __m256, 8) +test_3 (_mm256_maskz_min_round_pd, __m256d, __mmask8, __m256d, __m256d, 8) +test_3 (_mm256_maskz_min_round_ph, __m256h, __mmask16, __m256h, __m256h, 8) +test_3 (_mm256_maskz_min_round_ps, __m256, __mmask8, __m256, __m256, 8) test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8) test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8) test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8) @@ -1331,6 +1343,12 @@ test_4 (_mm256_maskz_fnmsub_round_ph, __m256h,__mmask16, __m256h, __m256h, __m25 test_4 (_mm256_mask_fnmsub_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9) test_4 (_mm256_mask3_fnmsub_round_ps, __m256, __m256, __m256, __m256, __mmask8, 9) test_4 (_mm256_maskz_fnmsub_round_ps, __m256,__mmask8, __m256, __m256, __m256, 9) +test_4 (_mm256_mask_max_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 8) +test_4 (_mm256_mask_max_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 8) +test_4 (_mm256_mask_max_round_ps, __m256, __m256, __mmask8, __m256, __m256, 8) +test_4 (_mm256_mask_min_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 8) +test_4 (_mm256_mask_min_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 8) +test_4 (_mm256_mask_min_round_ps, __m256, __m256, __mmask8, __m256, __m256, 8) test_4x (_mm256_maskz_fixupimm_round_pd, __m256d, __mmask8, __m256d, __m256d, __m256i, 3, 8) test_4x (_mm256_maskz_fixupimm_round_ps, __m256, __mmask8, __m256, __m256, __m256i, 3, 8) test_4x (_mm256_mask_fixupimm_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256i, 3, 8) diff --git a/gcc/testsuite/gcc.target/i386/sse-23.c b/gcc/testsuite/gcc.target/i386/sse-23.c index 95f55b2a843b..e27cb2d5bd2f 100644 --- a/gcc/testsuite/gcc.target/i386/sse-23.c +++ b/gcc/testsuite/gcc.target/i386/sse-23.c @@ -957,6 +957,12 @@ #define __builtin_ia32_getmantpd256_mask_round(A, F, C, D, E) __builtin_ia32_getmantpd256_mask_round(A, 1, C, D, 8) #define __builtin_ia32_getmantph256_mask_round(A, F, C, D, E) __builtin_ia32_getmantph256_mask_round(A, 1, C, D, 8) #define __builtin_ia32_getmantps256_mask_round(A, F, C, D, E) __builtin_ia32_getmantps256_mask_round(A, 1, C, D, 8) +#define __builtin_ia32_maxpd256_mask_round(A, B, C, D, E) __builtin_ia32_maxpd256_mask_round(A, B, C, D, 8) +#define __builtin_ia32_maxph256_mask_round(A, B, C, D, E) __builtin_ia32_maxph256_mask_round(A, B, C, D, 8) +#define __builtin_ia32_maxps256_mask_round(A, B, C, D, E) __builtin_ia32_maxps256_mask_round(A, B, C, D, 8) +#define __builtin_ia32_minpd256_mask_round(A, B, C, D, E) __builtin_ia32_minpd256_mask_round(A, B, C, D, 8) +#define __builtin_ia32_minph256_mask_round(A, B, C, D, E) __builtin_ia32_minph256_mask_round(A, B, C, D, 8) +#define __builtin_ia32_minps256_mask_round(A, B, C, D, E) __builtin_ia32_minps256_mask_round(A, B, C, D, 8) #pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2-512")