llvmbot wrote:

<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-clang

Author: Simon Pilgrim (RKSimon)

<details>
<summary>Changes</summary>

BLENDV intrinsics use the signbit of the condition mask to select between the 
LHS (false) and RHS (true) operands

First part of #<!-- -->157066 - the BLENDVPS/D requires floatbits hacking which 
I need to do some prep work for

---
Full diff: https://github.com/llvm/llvm-project/pull/157100.diff


7 Files Affected:

- (modified) clang/include/clang/Basic/BuiltinsX86.td (+4-3) 
- (modified) clang/lib/AST/ByteCode/InterpBuiltin.cpp (+7) 
- (modified) clang/lib/AST/ExprConstant.cpp (+22) 
- (modified) clang/lib/Headers/avx2intrin.h (+2-3) 
- (modified) clang/lib/Headers/smmintrin.h (+2-3) 
- (modified) clang/test/CodeGen/X86/avx2-builtins.c (+1) 
- (modified) clang/test/CodeGen/X86/sse41-builtins.c (+1) 


``````````diff
diff --git a/clang/include/clang/Basic/BuiltinsX86.td 
b/clang/include/clang/Basic/BuiltinsX86.td
index 0d44e78f879b9..e52955d22c3f7 100644
--- a/clang/include/clang/Basic/BuiltinsX86.td
+++ b/clang/include/clang/Basic/BuiltinsX86.td
@@ -311,8 +311,7 @@ let Features = "ssse3", Attributes = [NoThrow, Const, 
RequiredVectorWidth<128>]
 }
 
 let Features = "sse4.1", Attributes = [NoThrow, Const, 
RequiredVectorWidth<128>] in {
-  def insertps128 : X86Builtin<"_Vector<4, float>(_Vector<4, float>, 
_Vector<4, float>, _Constant char)">;
-  def pblendvb128 : X86Builtin<"_Vector<16, char>(_Vector<16, char>, 
_Vector<16, char>, _Vector<16, char>)">;
+  def insertps128 : X86Builtin<"_Vector<4, float>(_Vector<4, float>, 
_Vector<4, float>, _Constant char)">;  
   def pblendw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, 
short>, _Constant int)">;
   def blendpd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, _Vector<2, 
double>, _Constant int)">;
   def blendps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, 
float>, _Constant int)">;
@@ -336,6 +335,7 @@ let Features = "sse4.1", Attributes = [NoThrow, Const, 
RequiredVectorWidth<128>]
 }
 
 let Features = "sse4.1", Attributes = [NoThrow, Const, Constexpr, 
RequiredVectorWidth<128>] in {
+  def pblendvb128 : X86Builtin<"_Vector<16, char>(_Vector<16, char>, 
_Vector<16, char>, _Vector<16, char>)">;
   def pmuldq128 : X86Builtin<"_Vector<2, long long int>(_Vector<4, int>, 
_Vector<4, int>)">;
 }
 
@@ -573,7 +573,6 @@ let Features = "avx2", Attributes = [NoThrow, Const, 
RequiredVectorWidth<256>] i
   def palignr256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, 
_Vector<32, char>, _Constant int)">;
   def pavgb256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Vector<32, 
char>)">;
   def pavgw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, 
_Vector<16, short>)">;
-  def pblendvb256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, 
_Vector<32, char>, _Vector<32, char>)">;
   def pblendw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, 
_Vector<16, short>, _Constant int)">;
   def phaddw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, 
_Vector<16, short>)">;
   def phaddd256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, 
int>)">;
@@ -615,6 +614,8 @@ let Features = "avx2", Attributes = [NoThrow, Const, 
RequiredVectorWidth<256>] i
 }
 
 let Features = "avx2", Attributes = [NoThrow, Const, Constexpr, 
RequiredVectorWidth<256>] in {
+  def pblendvb256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, 
_Vector<32, char>, _Vector<32, char>)">;
+
   def pmuldq256 : X86Builtin<"_Vector<4, long long int>(_Vector<8, int>, 
_Vector<8, int>)">;
   def pmuludq256 : X86Builtin<"_Vector<4, long long int>(_Vector<8, int>, 
_Vector<8, int>)">;
 
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp 
b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 713895b191c88..4e9fae4196775 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3412,6 +3412,13 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, 
const CallExpr *Call,
   case Builtin::BI__builtin_elementwise_fma:
     return interp__builtin_elementwise_fma(S, OpPC, Call);
 
+  case clang::X86::BI__builtin_ia32_pblendvb128:
+  case clang::X86::BI__builtin_ia32_pblendvb256:
+    return interp__builtin_elementwise_triop(
+        S, OpPC, Call, [](const APSInt &F, const APSInt &T, const APSInt &C) {
+          return ((APInt)C).isNegative() ? T : F;
+        });
+
   case X86::BI__builtin_ia32_selectb_128:
   case X86::BI__builtin_ia32_selectb_256:
   case X86::BI__builtin_ia32_selectb_512:
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 6c6909e5b2370..100e944f9b48c 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -11995,6 +11995,28 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr 
*E) {
 
     return Success(APValue(ResultElements.data(), ResultElements.size()), E);
   }
+  case X86::BI__builtin_ia32_pblendvb128:
+  case X86::BI__builtin_ia32_pblendvb256: {
+    // SSE blendv by mask signbit: "Result = C[] < 0 ? T[] : F[]".
+    APValue SourceF, SourceT, SourceC;
+    if (!EvaluateAsRValue(Info, E->getArg(0), SourceF) ||
+        !EvaluateAsRValue(Info, E->getArg(1), SourceT) ||
+        !EvaluateAsRValue(Info, E->getArg(2), SourceC))
+      return false;
+
+    unsigned SourceLen = SourceF.getVectorLength();
+    SmallVector<APValue, 32> ResultElements;
+    ResultElements.reserve(SourceLen);
+
+    for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
+      const APValue &F = SourceF.getVectorElt(EltNum);
+      const APValue &T = SourceT.getVectorElt(EltNum);
+      APInt C = SourceC.getVectorElt(EltNum).getInt();
+      ResultElements.push_back(C.isNegative() ? T : F);
+    }
+
+    return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+  }
   case X86::BI__builtin_ia32_selectb_128:
   case X86::BI__builtin_ia32_selectb_256:
   case X86::BI__builtin_ia32_selectb_512:
diff --git a/clang/lib/Headers/avx2intrin.h b/clang/lib/Headers/avx2intrin.h
index 5a32312be200e..eb01d904470e3 100644
--- a/clang/lib/Headers/avx2intrin.h
+++ b/clang/lib/Headers/avx2intrin.h
@@ -566,9 +566,8 @@ _mm256_avg_epu16(__m256i __a, __m256i __b)
 ///    is 0, the byte is copied from \a __V1; otherwise, it is copied from
 ///    \a __V2.
 /// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M) {
   return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2,
                                               (__v32qi)__M);
 }
diff --git a/clang/lib/Headers/smmintrin.h b/clang/lib/Headers/smmintrin.h
index 06b3da8b48dfe..8ee6f38eff054 100644
--- a/clang/lib/Headers/smmintrin.h
+++ b/clang/lib/Headers/smmintrin.h
@@ -499,9 +499,8 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS 
_mm_blendv_ps(__m128 __V1,
 ///    position in the result. When a mask bit is 1, the corresponding 8-bit
 ///    element in operand \a __V2 is copied to the same position in the result.
 /// \returns A 128-bit vector of [16 x i8] containing the copied values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_blendv_epi8(__m128i __V1,
-                                                             __m128i __V2,
-                                                             __m128i __M) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_blendv_epi8(__m128i __V1, __m128i __V2, __m128i __M) {
   return (__m128i)__builtin_ia32_pblendvb128((__v16qi)__V1, (__v16qi)__V2,
                                              (__v16qi)__M);
 }
diff --git a/clang/test/CodeGen/X86/avx2-builtins.c 
b/clang/test/CodeGen/X86/avx2-builtins.c
index 18ce88c8fb4d0..724a5f693f9fe 100644
--- a/clang/test/CodeGen/X86/avx2-builtins.c
+++ b/clang/test/CodeGen/X86/avx2-builtins.c
@@ -164,6 +164,7 @@ __m256i test_mm256_blendv_epi8(__m256i a, __m256i b, 
__m256i m) {
   // CHECK: call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %{{.*}}, <32 x 
i8> %{{.*}}, <32 x i8> %{{.*}})
   return _mm256_blendv_epi8(a, b, m);
 }
+TEST_CONSTEXPR(match_v32qi(_mm256_blendv_epi8((__m256i)(__v32qs){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31},(__m256i)(__v32qs){-90,-91,-92,-93,-94,-95,-96,-97,-98,-99,-100,-101,-12,-13,-104,-105,-106,-107,-108,-109,-100,-101,-12,-13,-104,-105,-106,-107,-108,-109,-120,-121},(__m256i)(__v32qs){0,0,0,-1,0,-1,-1,0,0,0,-1,-1,0,-1,0,0,0,0,0,0,0,0,0,-1,-1,-1,0,0,0,0,0,-1}),
 0, 1, 2, -93, 4, -95, -96, 7, 8, 9, -100, -101, 12, -13, 14, 15, 16, 17, 18, 
19, 20, 21, 22, -13, -104, -105, 26, 27, 28, 29, 30, -121));
 
 __m128i test_mm_broadcastb_epi8(__m128i a) {
   // CHECK-LABEL: test_mm_broadcastb_epi8
diff --git a/clang/test/CodeGen/X86/sse41-builtins.c 
b/clang/test/CodeGen/X86/sse41-builtins.c
index 06c992e20baeb..3abfee0409f16 100644
--- a/clang/test/CodeGen/X86/sse41-builtins.c
+++ b/clang/test/CodeGen/X86/sse41-builtins.c
@@ -45,6 +45,7 @@ __m128i test_mm_blendv_epi8(__m128i V1, __m128i V2, __m128i 
V3) {
   // CHECK: call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %{{.*}}, <16 x 
i8> %{{.*}}, <16 x i8> %{{.*}})
   return _mm_blendv_epi8(V1, V2, V3);
 }
+TEST_CONSTEXPR(match_v16qi(_mm_blendv_epi8((__m128i)(__v16qs){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15},(__m128i)(__v16qs){-99,-98,97,-96,-95,-94,-93,-92,-91,-90,-89,-88,-87,-86,-85,-84},(__m128i)(__v16qs){-1,-1,0,-1,0,0,0,0,0,-1,-1,-1,0,0,-1,0}),
 -99, -98, 2, -96, 4, 5, 6, 7, 8, -90, -89, -88, 12, 13, -85, 15));
 
 __m128d test_mm_blendv_pd(__m128d V1, __m128d V2, __m128d V3) {
   // CHECK-LABEL: test_mm_blendv_pd

``````````

</details>


https://github.com/llvm/llvm-project/pull/157100
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to