github-actions[bot] wrote:

<!--LLVM CODE FORMAT COMMENT: {clang-format}-->


:warning: C/C++ code formatter, clang-format found issues in your code. 
:warning:

<details>
<summary>
You can test this locally with the following command:
</summary>

``````````bash
git-clang-format --diff HEAD~1 HEAD --extensions h,c -- 
clang/lib/Headers/fma4intrin.h clang/lib/Headers/fmaintrin.h 
clang/test/CodeGen/X86/fma-builtins.c clang/test/CodeGen/X86/fma4-builtins.c
``````````

</details>

<details>
<summary>
View the diff from clang-format here.
</summary>

``````````diff
diff --git a/clang/lib/Headers/fma4intrin.h b/clang/lib/Headers/fma4intrin.h
index 05261f638..e0a0e4c96 100644
--- a/clang/lib/Headers/fma4intrin.h
+++ b/clang/lib/Headers/fma4intrin.h
@@ -28,16 +28,14 @@
 #define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
 #endif
 
-static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR 
-_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C) {
   return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B,
                                            (__v4sf)__C);
 }
 
-static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR 
-_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C) {
   return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B,
                                             (__v2df)__C);
 }
@@ -54,16 +52,14 @@ _mm_macc_sd(__m128d __A, __m128d __B, __m128d __C)
   return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, 
(__v2df)__C);
 }
 
-static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR 
-_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C) {
   return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B,
                                            -(__v4sf)__C);
 }
 
-static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR 
-_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C) {
   return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B,
                                             -(__v2df)__C);
 }
@@ -80,16 +76,14 @@ _mm_msub_sd(__m128d __A, __m128d __B, __m128d __C)
   return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, 
-(__v2df)__C);
 }
 
-static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR 
-_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C) {
   return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B,
                                            (__v4sf)__C);
 }
 
-static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR 
-_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C) {
   return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B,
                                             (__v2df)__C);
 }
@@ -106,16 +100,14 @@ _mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C)
   return (__m128d)__builtin_ia32_vfmaddsd(-(__v2df)__A, (__v2df)__B, 
(__v2df)__C);
 }
 
-static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR 
-_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C) {
   return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B,
                                            -(__v4sf)__C);
 }
 
-static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR 
-_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C) {
   return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B,
                                             -(__v2df)__C);
 }
@@ -156,58 +148,50 @@ _mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C)
   return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, 
-(__v2df)__C);
 }
 
-static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR 
-_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C)
-{
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C) {
   return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B,
                                            (__v8sf)__C);
 }
 
-static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR 
-_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C)
-{
+static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C) {
   return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B,
                                             (__v4df)__C);
 }
 
-static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR 
-_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C)
-{
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C) {
   return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B,
                                            -(__v8sf)__C);
 }
 
-static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR 
-_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C)
-{
+static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C) {
   return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B,
                                             -(__v4df)__C);
 }
 
-static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR 
-_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C)
-{
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C) {
   return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B,
                                            (__v8sf)__C);
 }
 
-static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR 
-_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C)
-{
+static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C) {
   return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B,
                                             (__v4df)__C);
 }
 
-static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR 
-_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C)
-{
+static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C) {
   return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B,
                                            -(__v8sf)__C);
 }
 
-static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR 
-_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C)
-{
+static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C) {
   return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B,
                                             -(__v4df)__C);
 }
diff --git a/clang/lib/Headers/fmaintrin.h b/clang/lib/Headers/fmaintrin.h
index d8ea48902..2d08c179b 100644
--- a/clang/lib/Headers/fmaintrin.h
+++ b/clang/lib/Headers/fmaintrin.h
@@ -41,8 +41,7 @@
 ///    A 128-bit vector of [4 x float] containing the addend.
 /// \returns A 128-bit vector of [4 x float] containing the result.
 static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
-{
+_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C) {
   return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B,
                                            (__v4sf)__C);
 }
@@ -62,8 +61,7 @@ _mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
 ///    A 128-bit vector of [2 x double] containing the addend.
 /// \returns A 128-bit [2 x double] vector containing the result.
 static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
-{
+_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C) {
   return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B,
                                             (__v2df)__C);
 }
@@ -141,8 +139,7 @@ _mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
 ///    A 128-bit vector of [4 x float] containing the subtrahend.
 /// \returns A 128-bit vector of [4 x float] containing the result.
 static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
-{
+_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C) {
   return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B,
                                            -(__v4sf)__C);
 }
@@ -162,8 +159,7 @@ _mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
 ///    A 128-bit vector of [2 x double] containing the addend.
 /// \returns A 128-bit vector of [2 x double] containing the result.
 static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
-{
+_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C) {
   return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B,
                                             -(__v2df)__C);
 }
@@ -241,8 +237,7 @@ _mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
 ///    A 128-bit vector of [4 x float] containing the addend.
 /// \returns A 128-bit [4 x float] vector containing the result.
 static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
-{
+_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C) {
   return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B,
                                            (__v4sf)__C);
 }
@@ -262,8 +257,7 @@ _mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
 ///    A 128-bit vector of [2 x double] containing the addend.
 /// \returns A 128-bit vector of [2 x double] containing the result.
 static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
-{
+_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C) {
   return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B,
                                             (__v2df)__C);
 }
@@ -341,8 +335,7 @@ _mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
 ///    A 128-bit vector of [4 x float] containing the subtrahend.
 /// \returns A 128-bit vector of [4 x float] containing the result.
 static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
-{
+_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C) {
   return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B,
                                            -(__v4sf)__C);
 }
@@ -362,8 +355,7 @@ _mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
 ///    A 128-bit vector of [2 x double] containing the subtrahend.
 /// \returns A 128-bit vector of [2 x double] containing the result.
 static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
-{
+_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C) {
   return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B,
                                             -(__v2df)__C);
 }
@@ -545,8 +537,7 @@ _mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
 ///    A 256-bit vector of [8 x float] containing the addend.
 /// \returns A 256-bit vector of [8 x float] containing the result.
 static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
-{
+_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C) {
   return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B,
                                            (__v8sf)__C);
 }
@@ -566,8 +557,7 @@ _mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
 ///    A 256-bit vector of [4 x double] containing the addend.
 /// \returns A 256-bit vector of [4 x double] containing the result.
 static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
-{
+_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C) {
   return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B,
                                             (__v4df)__C);
 }
@@ -587,8 +577,7 @@ _mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
 ///    A 256-bit vector of [8 x float] containing the subtrahend.
 /// \returns A 256-bit vector of [8 x float] containing the result.
 static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
-{
+_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C) {
   return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B,
                                            -(__v8sf)__C);
 }
@@ -608,8 +597,7 @@ _mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
 ///    A 256-bit vector of [4 x double] containing the subtrahend.
 /// \returns A 256-bit vector of [4 x double] containing the result.
 static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
-{
+_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C) {
   return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B,
                                             -(__v4df)__C);
 }
@@ -629,8 +617,7 @@ _mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
 ///    A 256-bit vector of [8 x float] containing the addend.
 /// \returns A 256-bit vector of [8 x float] containing the result.
 static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
-{
+_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C) {
   return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B,
                                            (__v8sf)__C);
 }
@@ -650,8 +637,7 @@ _mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
 ///    A 256-bit vector of [4 x double] containing the addend.
 /// \returns A 256-bit vector of [4 x double] containing the result.
 static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
-{
+_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C) {
   return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B,
                                             (__v4df)__C);
 }
@@ -671,8 +657,7 @@ _mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
 ///    A 256-bit vector of [8 x float] containing the subtrahend.
 /// \returns A 256-bit vector of [8 x float] containing the result.
 static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
-{
+_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C) {
   return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B,
                                            -(__v8sf)__C);
 }
@@ -692,8 +677,7 @@ _mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
 ///    A 256-bit vector of [4 x double] containing the subtrahend.
 /// \returns A 256-bit vector of [4 x double] containing the result.
 static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
-{
+_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C) {
   return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B,
                                             -(__v4df)__C);
 }

``````````

</details>


https://github.com/llvm/llvm-project/pull/154558
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to