OK for master and backports?
Tested on x86_64-pc-linux-gnu
----- >8 -----
The call to the base implementation sometimes didn't find a matching
signature because the _Abi parameter of _SimdImpl* was "wrong" after
conversion. It has to call into <new ABI tag>::_SimdImpl instead of the
current ABI tag's _SimdImpl. This also reduces the number of possible
template instantiations.
Signed-off-by: Matthias Kretz <m.kr...@gsi.de>
libstdc++-v3/ChangeLog:
PR libstdc++/110054
* include/experimental/bits/simd_builtin.h (_S_masked_store):
Call into deduced ABI's SimdImpl after conversion.
* include/experimental/bits/simd_x86.h (_S_masked_store_nocvt):
Don't use _mm_maskmoveu_si128. Use the generic fall-back
implementation. Also fix masked stores without SSE2, which
were not doing anything before.
---
.../include/experimental/bits/simd_builtin.h | 6 +--
.../include/experimental/bits/simd_x86.h | 38 ++-----------------
2 files changed, 7 insertions(+), 37 deletions(-)
--
──────────────────────────────────────────────────────────────────────────
Dr. Matthias Kretz https://mattkretz.github.io
GSI Helmholtz Centre for Heavy Ion Research https://gsi.de
stdₓ::simd
──────────────────────────────────────────────────────────────────────────
diff --git a/libstdc++-v3/include/experimental/bits/simd_builtin.h b/libstdc++-v3/include/experimental/bits/simd_builtin.h
index 8337fa2d9a6..64ef6efaf8c 100644
--- a/libstdc++-v3/include/experimental/bits/simd_builtin.h
+++ b/libstdc++-v3/include/experimental/bits/simd_builtin.h
@@ -1628,7 +1628,7 @@ _S_masked_store_nocvt(_SimdWrapper<_Tp, _Np> __v, _Tp* __mem, _MaskMember<_Tp> _
if constexpr (_UW_size == _TV_size) // one convert+store
{
const _UW __converted = __convert<_UW>(__v);
- _SuperImpl::_S_masked_store_nocvt(
+ _UAbi::_SimdImpl::_S_masked_store_nocvt(
__converted, __mem,
_UAbi::_MaskImpl::template _S_convert<
__int_for_sizeof_t<_Up>>(__k));
@@ -1643,7 +1643,7 @@ _S_masked_store_nocvt(_SimdWrapper<_Tp, _Np> __v, _Tp* __mem, _MaskMember<_Tp> _
const array<_UV, _NAllStores> __converted
= __convert_all<_UV, _NAllStores>(__v);
__execute_n_times<_NFullStores>([&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
- _SuperImpl::_S_masked_store_nocvt(
+ _UAbi::_SimdImpl::_S_masked_store_nocvt(
_UW(__converted[__i]), __mem + __i * _UW_size,
_UAbi::_MaskImpl::template _S_convert<
__int_for_sizeof_t<_Up>>(
@@ -1651,7 +1651,7 @@ _S_masked_store_nocvt(_SimdWrapper<_Tp, _Np> __v, _Tp* __mem, _MaskMember<_Tp> _
});
if constexpr (_NAllStores
> _NFullStores) // one partial at the end
- _SuperImpl::_S_masked_store_nocvt(
+ _UAbi::_SimdImpl::_S_masked_store_nocvt(
_UW(__converted[_NFullStores]),
__mem + _NFullStores * _UW_size,
_UAbi::_MaskImpl::template _S_convert<
diff --git a/libstdc++-v3/include/experimental/bits/simd_x86.h b/libstdc++-v3/include/experimental/bits/simd_x86.h
index 77d2f84ab71..2e301e45677 100644
--- a/libstdc++-v3/include/experimental/bits/simd_x86.h
+++ b/libstdc++-v3/include/experimental/bits/simd_x86.h
@@ -1106,31 +1106,6 @@ _S_masked_store_nocvt(_SimdWrapper<_Tp, _Np> __v, _Tp* __mem, _SimdWrapper<bool,
else
_mm512_mask_storeu_pd(__mem, __k, __vi);
}
-#if 0 // with KNL either sizeof(_Tp) >= 4 or sizeof(_vi) <= 32
- // with Skylake-AVX512, __have_avx512bw is true
- else if constexpr (__have_sse2)
- {
- using _M = __vector_type_t<_Tp, _Np>;
- using _MVT = _VectorTraits<_M>;
- _mm_maskmoveu_si128(__auto_bitcast(__extract<0, 4>(__v._M_data)),
- __auto_bitcast(_MaskImpl::template _S_convert<_Tp, _Np>(__k._M_data)),
- reinterpret_cast<char*>(__mem));
- _mm_maskmoveu_si128(__auto_bitcast(__extract<1, 4>(__v._M_data)),
- __auto_bitcast(_MaskImpl::template _S_convert<_Tp, _Np>(
- __k._M_data >> 1 * _MVT::_S_full_size)),
- reinterpret_cast<char*>(__mem) + 1 * 16);
- _mm_maskmoveu_si128(__auto_bitcast(__extract<2, 4>(__v._M_data)),
- __auto_bitcast(_MaskImpl::template _S_convert<_Tp, _Np>(
- __k._M_data >> 2 * _MVT::_S_full_size)),
- reinterpret_cast<char*>(__mem) + 2 * 16);
- if constexpr (_Np > 48 / sizeof(_Tp))
- _mm_maskmoveu_si128(
- __auto_bitcast(__extract<3, 4>(__v._M_data)),
- __auto_bitcast(_MaskImpl::template _S_convert<_Tp, _Np>(
- __k._M_data >> 3 * _MVT::_S_full_size)),
- reinterpret_cast<char*>(__mem) + 3 * 16);
- }
-#endif
else
__assert_unreachable<_Tp>();
}
@@ -1233,8 +1208,8 @@ _S_masked_store_nocvt(_SimdWrapper<_Tp, _Np> __v, _Tp* __mem,
else if constexpr (__have_avx && sizeof(_Tp) == 8)
_mm_maskstore_pd(reinterpret_cast<double*>(__mem), __ki,
__vector_bitcast<double>(__vi));
- else if constexpr (__have_sse2)
- _mm_maskmoveu_si128(__vi, __ki, reinterpret_cast<char*>(__mem));
+ else
+ _Base::_S_masked_store_nocvt(__v, __mem, __k);
}
else if constexpr (sizeof(__v) == 32)
{
@@ -1259,13 +1234,8 @@ _S_masked_store_nocvt(_SimdWrapper<_Tp, _Np> __v, _Tp* __mem,
else if constexpr (__have_avx && sizeof(_Tp) == 8)
_mm256_maskstore_pd(reinterpret_cast<double*>(__mem), __ki,
__vector_bitcast<double>(__v));
- else if constexpr (__have_sse2)
- {
- _mm_maskmoveu_si128(__lo128(__vi), __lo128(__ki),
- reinterpret_cast<char*>(__mem));
- _mm_maskmoveu_si128(__hi128(__vi), __hi128(__ki),
- reinterpret_cast<char*>(__mem) + 16);
- }
+ else
+ _Base::_S_masked_store_nocvt(__v, __mem, __k);
}
else
__assert_unreachable<_Tp>();