Hi, The intrinsic compatibility headers make use of some deprecated functions for vector shifts, which are not available in some compilers. For compatibility reasons, this patch, replaces those with intrinsics guaranteed to be portable.
Bootstrapped and tested on powerpc64le-linux-gnu with no regressions. Is this okay for trunk? Thanks, Bill 2018-10-24 Bill Schmidt <wschm...@linux.ibm.com> Jinsong Ji <j...@us.ibm.com> * config/rs6000/emmintrin.h (_mm_slli_epi16): Replace deprecated function with vec_sl. (_mm_slli_epi32): Likewise. (_mm_slli_epi64): Likewise. (_mm_srai_epi16): Replace deprecated function with vec_sra. (_mm_srai_epi32): Likewise. (_mm_srli_epi16): Replace deprecated function with vec_sr. (_mm_srli_epi32): Likewise. (_mm_srli_epi64): Likewise. (_mm_sll_epi16): Replace deprecated function with vec_sl. (_mm_sll_epi32): Likewise. (_mm_sll_epi64): Likewise. (_mm_sra_epi16): Replace deprecated function with vec_sra. (_mm_sra_epi32): Likewise. (_mm_srl_epi16): Replace deprecated function with vec_sr. (_mm_srl_epi32): Likewise. (_mm_srl_epi64): Likewise. Index: gcc/config/rs6000/emmintrin.h =================================================================== --- gcc/config/rs6000/emmintrin.h (revision 265464) +++ gcc/config/rs6000/emmintrin.h (working copy) @@ -1504,7 +1504,7 @@ _mm_slli_epi16 (__m128i __A, int __B) else lshift = vec_splats ((unsigned short) __B); - result = vec_vslh ((__v8hi) __A, lshift); + result = vec_sl ((__v8hi) __A, lshift); } return (__m128i) result; @@ -1523,7 +1523,7 @@ _mm_slli_epi32 (__m128i __A, int __B) else lshift = vec_splats ((unsigned int) __B); - result = vec_vslw ((__v4si) __A, lshift); + result = vec_sl ((__v4si) __A, lshift); } return (__m128i) result; @@ -1543,7 +1543,7 @@ _mm_slli_epi64 (__m128i __A, int __B) else lshift = (__v2du) vec_splats ((unsigned int) __B); - result = vec_vsld ((__v2di) __A, lshift); + result = vec_sl ((__v2di) __A, lshift); } return (__m128i) result; @@ -1563,7 +1563,7 @@ _mm_srai_epi16 (__m128i __A, int __B) else rshift = vec_splats ((unsigned short) __B); } - result = vec_vsrah ((__v8hi) __A, rshift); + result = vec_sra ((__v8hi) __A, rshift); return (__m128i) result; } @@ -1586,7 +1586,7 @@ _mm_srai_epi32 (__m128i __A, int __B) else rshift = vec_splats ((unsigned int) __B); } - result = vec_vsraw ((__v4si) __A, rshift); + result = vec_sra ((__v4si) __A, rshift); return (__m128i) result; } @@ -1666,7 +1666,7 @@ _mm_srli_epi16 (__m128i __A, int __B) else rshift = vec_splats ((unsigned short) __B); - result = vec_vsrh ((__v8hi) __A, rshift); + result = vec_sr ((__v8hi) __A, rshift); } return (__m128i) result; @@ -1690,7 +1690,7 @@ _mm_srli_epi32 (__m128i __A, int __B) else rshift = vec_splats ((unsigned int) __B); - result = vec_vsrw ((__v4si) __A, rshift); + result = vec_sr ((__v4si) __A, rshift); } return (__m128i) result; @@ -1715,7 +1715,7 @@ _mm_srli_epi64 (__m128i __A, int __B) else rshift = (__v2du) vec_splats ((unsigned int) __B); - result = vec_vsrd ((__v2di) __A, rshift); + result = vec_sr ((__v2di) __A, rshift); } return (__m128i) result; @@ -1735,7 +1735,7 @@ _mm_sll_epi16 (__m128i __A, __m128i __B) lshift = vec_splat ((__v8hu)__B, 3); #endif shmask = lshift <= shmax; - result = vec_vslh ((__v8hu) __A, lshift); + result = vec_sl ((__v8hu) __A, lshift); result = vec_sel (shmask, result, shmask); return (__m128i) result; @@ -1753,7 +1753,7 @@ _mm_sll_epi32 (__m128i __A, __m128i __B) lshift = vec_splat ((__v4su)__B, 1); #endif shmask = lshift < shmax; - result = vec_vslw ((__v4su) __A, lshift); + result = vec_sl ((__v4su) __A, lshift); result = vec_sel (shmask, result, shmask); return (__m128i) result; @@ -1769,7 +1769,7 @@ _mm_sll_epi64 (__m128i __A, __m128i __B) lshift = (__v2du) vec_splat ((__v2du)__B, 0); shmask = lshift < shmax; - result = vec_vsld ((__v2du) __A, lshift); + result = vec_sl ((__v2du) __A, lshift); result = (__v2du) vec_sel ((__v2df) shmask, (__v2df) result, (__v2df) shmask); @@ -1790,7 +1790,7 @@ _mm_sra_epi16 (__m128i __A, __m128i __B) rshift = vec_splat ((__v8hu)__B, 3); #endif rshift = vec_min (rshift, rshmax); - result = vec_vsrah ((__v8hi) __A, rshift); + result = vec_sra ((__v8hi) __A, rshift); return (__m128i) result; } @@ -1808,7 +1808,7 @@ _mm_sra_epi32 (__m128i __A, __m128i __B) rshift = vec_splat ((__v4su)__B, 1); #endif rshift = vec_min (rshift, rshmax); - result = vec_vsraw ((__v4si) __A, rshift); + result = vec_sra ((__v4si) __A, rshift); return (__m128i) result; } @@ -1826,7 +1826,7 @@ _mm_srl_epi16 (__m128i __A, __m128i __B) rshift = vec_splat ((__v8hu)__B, 3); #endif shmask = rshift <= shmax; - result = vec_vsrh ((__v8hu) __A, rshift); + result = vec_sr ((__v8hu) __A, rshift); result = vec_sel (shmask, result, shmask); return (__m128i) result; @@ -1845,7 +1845,7 @@ _mm_srl_epi32 (__m128i __A, __m128i __B) rshift = vec_splat ((__v4su)__B, 1); #endif shmask = rshift < shmax; - result = vec_vsrw ((__v4su) __A, rshift); + result = vec_sr ((__v4su) __A, rshift); result = vec_sel (shmask, result, shmask); return (__m128i) result; @@ -1861,7 +1861,7 @@ _mm_srl_epi64 (__m128i __A, __m128i __B) rshift = (__v2du) vec_splat ((__v2du)__B, 0); shmask = rshift < shmax; - result = vec_vsrd ((__v2du) __A, rshift); + result = vec_sr ((__v2du) __A, rshift); result = (__v2du)vec_sel ((__v2du)shmask, (__v2du)result, (__v2du)shmask); return (__m128i) result;