On Thu, 2021-07-29 at 08:30 -0500, Bill Schmidt wrote: > 2021-06-07 Bill Schmidt <wschm...@linux.ibm.com> >
Hi, > gcc/ > * config/rs6000/rs6000-builtin-new.def: Add vsx stanza. > --- > gcc/config/rs6000/rs6000-builtin-new.def | 857 +++++++++++++++++++++++ > 1 file changed, 857 insertions(+) > ok > diff --git a/gcc/config/rs6000/rs6000-builtin-new.def > b/gcc/config/rs6000/rs6000-builtin-new.def > index f1aa5529cdd..974cdc8c37c 100644 > --- a/gcc/config/rs6000/rs6000-builtin-new.def > +++ b/gcc/config/rs6000/rs6000-builtin-new.def > @@ -1028,3 +1028,860 @@ > > const vss __builtin_vec_set_v8hi (vss, signed short, const int<3>); > VEC_SET_V8HI nothing {set} > + > + > +; VSX builtins. > +[vsx] > + pure vd __builtin_altivec_lvx_v2df (signed long, const void *); > + LVX_V2DF altivec_lvx_v2df {ldvec} > + > + pure vsll __builtin_altivec_lvx_v2di (signed long, const void *); > + LVX_V2DI altivec_lvx_v2di {ldvec} > + > + pure vd __builtin_altivec_lvxl_v2df (signed long, const void *); > + LVXL_V2DF altivec_lvxl_v2df {ldvec} > + > + pure vsll __builtin_altivec_lvxl_v2di (signed long, const void *); > + LVXL_V2DI altivec_lvxl_v2di {ldvec} > + > + const vd __builtin_altivec_nabs_v2df (vd); > + NABS_V2DF vsx_nabsv2df2 {} > + > + const vsll __builtin_altivec_nabs_v2di (vsll); > + NABS_V2DI nabsv2di2 {} > + > + void __builtin_altivec_stvx_v2df (vd, signed long, void *); > + STVX_V2DF altivec_stvx_v2df {stvec} > + > + void __builtin_altivec_stvx_v2di (vsll, signed long, void *); > + STVX_V2DI altivec_stvx_v2di {stvec} > + > + void __builtin_altivec_stvxl_v2df (vd, signed long, void *); > + STVXL_V2DF altivec_stvxl_v2df {stvec} > + > + void __builtin_altivec_stvxl_v2di (vsll, signed long, void *); > + STVXL_V2DI altivec_stvxl_v2di {stvec} > + > + const vd __builtin_altivec_vand_v2df (vd, vd); > + VAND_V2DF andv2df3 {} > + > + const vsll __builtin_altivec_vand_v2di (vsll, vsll); > + VAND_V2DI andv2di3 {} > + > + const vull __builtin_altivec_vand_v2di_uns (vull, vull); > + VAND_V2DI_UNS andv2di3 {} > + > + const vd __builtin_altivec_vandc_v2df (vd, vd); > + VANDC_V2DF andcv2df3 {} > + > + const vsll __builtin_altivec_vandc_v2di (vsll, vsll); > + VANDC_V2DI andcv2di3 {} > + > + const vull __builtin_altivec_vandc_v2di_uns (vull, vull); > + VANDC_V2DI_UNS andcv2di3 {} > + > + const vsll __builtin_altivec_vcmpequd (vull, vull); > + VCMPEQUD vector_eqv2di {} > + > + const int __builtin_altivec_vcmpequd_p (int, vsll, vsll); > + VCMPEQUD_P vector_eq_v2di_p {pred} > + > + const vsll __builtin_altivec_vcmpgtsd (vsll, vsll); > + VCMPGTSD vector_gtv2di {} > + > + const int __builtin_altivec_vcmpgtsd_p (int, vsll, vsll); > + VCMPGTSD_P vector_gt_v2di_p {pred} > + > + const vsll __builtin_altivec_vcmpgtud (vull, vull); > + VCMPGTUD vector_gtuv2di {} > + > + const int __builtin_altivec_vcmpgtud_p (int, vsll, vsll); > + VCMPGTUD_P vector_gtu_v2di_p {pred} > + > + const vd __builtin_altivec_vnor_v2df (vd, vd); > + VNOR_V2DF norv2df3 {} > + > + const vsll __builtin_altivec_vnor_v2di (vsll, vsll); > + VNOR_V2DI norv2di3 {} > + > + const vull __builtin_altivec_vnor_v2di_uns (vull, vull); > + VNOR_V2DI_UNS norv2di3 {} > + > + const vd __builtin_altivec_vor_v2df (vd, vd); > + VOR_V2DF iorv2df3 {} > + > + const vsll __builtin_altivec_vor_v2di (vsll, vsll); > + VOR_V2DI iorv2di3 {} > + > + const vull __builtin_altivec_vor_v2di_uns (vull, vull); > + VOR_V2DI_UNS iorv2di3 {} > + > + const vd __builtin_altivec_vperm_2df (vd, vd, vuc); > + VPERM_2DF altivec_vperm_v2df {} > + > + const vsll __builtin_altivec_vperm_2di (vsll, vsll, vuc); > + VPERM_2DI altivec_vperm_v2di {} > + > + const vull __builtin_altivec_vperm_2di_uns (vull, vull, vuc); > + VPERM_2DI_UNS altivec_vperm_v2di_uns {} > + > + const vd __builtin_altivec_vreve_v2df (vd); > + VREVE_V2DF altivec_vrevev2df2 {} > + > + const vsll __builtin_altivec_vreve_v2di (vsll); > + VREVE_V2DI altivec_vrevev2di2 {} > + > + const vd __builtin_altivec_vsel_2df (vd, vd, vd); > + VSEL_2DF vector_select_v2df {} > + > + const vsll __builtin_altivec_vsel_2di (vsll, vsll, vsll); > + VSEL_2DI_B vector_select_v2di {} > + > + const vull __builtin_altivec_vsel_2di_uns (vull, vull, vull); > + VSEL_2DI_UNS vector_select_v2di_uns {} > + > + const vd __builtin_altivec_vsldoi_2df (vd, vd, const int<4>); > + VSLDOI_2DF altivec_vsldoi_v2df {} > + > + const vsll __builtin_altivec_vsldoi_2di (vsll, vsll, const int<4>); > + VSLDOI_2DI altivec_vsldoi_v2di {} > + > + const vd __builtin_altivec_vxor_v2df (vd, vd); > + VXOR_V2DF xorv2df3 {} > + > + const vsll __builtin_altivec_vxor_v2di (vsll, vsll); > + VXOR_V2DI xorv2di3 {} > + > + const vull __builtin_altivec_vxor_v2di_uns (vull, vull); > + VXOR_V2DI_UNS xorv2di3 {} > + > + const signed __int128 __builtin_vec_ext_v1ti (vsq, signed int); > + VEC_EXT_V1TI nothing {extract} > + > + const double __builtin_vec_ext_v2df (vd, signed int); > + VEC_EXT_V2DF nothing {extract} > + > + const signed long long __builtin_vec_ext_v2di (vsll, signed int); > + VEC_EXT_V2DI nothing {extract} > + > + const vsq __builtin_vec_init_v1ti (signed __int128); > + VEC_INIT_V1TI nothing {init} > + > + const vd __builtin_vec_init_v2df (double, double); > + VEC_INIT_V2DF nothing {init} > + > + const vsll __builtin_vec_init_v2di (signed long long, signed long long); > + VEC_INIT_V2DI nothing {init} > + > + const vsq __builtin_vec_set_v1ti (vsq, signed __int128, const int<0,0>); > + VEC_SET_V1TI nothing {set} > + > + const vd __builtin_vec_set_v2df (vd, double, const int<1>); > + VEC_SET_V2DF nothing {set} > + > + const vsll __builtin_vec_set_v2di (vsll, signed long long, const int<1>); > + VEC_SET_V2DI nothing {set} > + > + const vsc __builtin_vsx_cmpge_16qi (vsc, vsc); > + CMPGE_16QI vector_nltv16qi {} > + > + const vsll __builtin_vsx_cmpge_2di (vsll, vsll); > + CMPGE_2DI vector_nltv2di {} > + > + const vsi __builtin_vsx_cmpge_4si (vsi, vsi); > + CMPGE_4SI vector_nltv4si {} > + > + const vss __builtin_vsx_cmpge_8hi (vss, vss); > + CMPGE_8HI vector_nltv8hi {} > + > + const vsc __builtin_vsx_cmpge_u16qi (vuc, vuc); > + CMPGE_U16QI vector_nltuv16qi {} > + > + const vsll __builtin_vsx_cmpge_u2di (vull, vull); > + CMPGE_U2DI vector_nltuv2di {} > + > + const vsi __builtin_vsx_cmpge_u4si (vui, vui); > + CMPGE_U4SI vector_nltuv4si {} > + > + const vss __builtin_vsx_cmpge_u8hi (vus, vus); > + CMPGE_U8HI vector_nltuv8hi {} > + > + const vsc __builtin_vsx_cmple_16qi (vsc, vsc); > + CMPLE_16QI vector_ngtv16qi {} > + > + const vsll __builtin_vsx_cmple_2di (vsll, vsll); > + CMPLE_2DI vector_ngtv2di {} > + > + const vsi __builtin_vsx_cmple_4si (vsi, vsi); > + CMPLE_4SI vector_ngtv4si {} > + > + const vss __builtin_vsx_cmple_8hi (vss, vss); > + CMPLE_8HI vector_ngtv8hi {} > + > + const vsc __builtin_vsx_cmple_u16qi (vsc, vsc); > + CMPLE_U16QI vector_ngtuv16qi {} > + > + const vsll __builtin_vsx_cmple_u2di (vsll, vsll); > + CMPLE_U2DI vector_ngtuv2di {} > + > + const vsi __builtin_vsx_cmple_u4si (vsi, vsi); > + CMPLE_U4SI vector_ngtuv4si {} > + > + const vss __builtin_vsx_cmple_u8hi (vss, vss); > + CMPLE_U8HI vector_ngtuv8hi {} > + > + const vd __builtin_vsx_concat_2df (double, double); > + CONCAT_2DF vsx_concat_v2df {} > + > + const vsll __builtin_vsx_concat_2di (signed long long, signed long long); > + CONCAT_2DI vsx_concat_v2di {} > + > + const vd __builtin_vsx_cpsgndp (vd, vd); > + CPSGNDP vector_copysignv2df3 {} > + > + const vf __builtin_vsx_cpsgnsp (vf, vf); > + CPSGNSP vector_copysignv4sf3 {} > + > + const vsll __builtin_vsx_div_2di (vsll, vsll); > + DIV_V2DI vsx_div_v2di {} > + > + const vd __builtin_vsx_doublee_v4sf (vf); > + DOUBLEE_V4SF doubleev4sf2 {} > + > + const vd __builtin_vsx_doublee_v4si (vsi); > + DOUBLEE_V4SI doubleev4si2 {} > + > + const vd __builtin_vsx_doubleh_v4sf (vf); > + DOUBLEH_V4SF doublehv4sf2 {} > + > + const vd __builtin_vsx_doubleh_v4si (vsi); > + DOUBLEH_V4SI doublehv4si2 {} > + > + const vd __builtin_vsx_doublel_v4sf (vf); > + DOUBLEL_V4SF doublelv4sf2 {} > + > + const vd __builtin_vsx_doublel_v4si (vsi); > + DOUBLEL_V4SI doublelv4si2 {} > + > + const vd __builtin_vsx_doubleo_v4sf (vf); > + DOUBLEO_V4SF doubleov4sf2 {} > + > + const vd __builtin_vsx_doubleo_v4si (vsi); > + DOUBLEO_V4SI doubleov4si2 {} > + > + const vf __builtin_vsx_floate_v2df (vd); > + FLOATE_V2DF floatev2df {} > + > + const vf __builtin_vsx_floate_v2di (vsll); > + FLOATE_V2DI floatev2di {} > + > + const vf __builtin_vsx_floato_v2df (vd); > + FLOATO_V2DF floatov2df {} > + > + const vf __builtin_vsx_floato_v2di (vsll); > + FLOATO_V2DI floatov2di {} > + > + pure vsq __builtin_vsx_ld_elemrev_v1ti (signed long, const void *); > + LD_ELEMREV_V1TI vsx_ld_elemrev_v1ti {ldvec,endian} > + > + pure vd __builtin_vsx_ld_elemrev_v2df (signed long, const void *); > + LD_ELEMREV_V2DF vsx_ld_elemrev_v2df {ldvec,endian} > + > + pure vsll __builtin_vsx_ld_elemrev_v2di (signed long, const void *); > + LD_ELEMREV_V2DI vsx_ld_elemrev_v2di {ldvec,endian} > + > + pure vf __builtin_vsx_ld_elemrev_v4sf (signed long, const void *); > + LD_ELEMREV_V4SF vsx_ld_elemrev_v4sf {ldvec,endian} > + > + pure vsi __builtin_vsx_ld_elemrev_v4si (signed long, const void *); > + LD_ELEMREV_V4SI vsx_ld_elemrev_v4si {ldvec,endian} > + > + pure vss __builtin_vsx_ld_elemrev_v8hi (signed long, const void *); > + LD_ELEMREV_V8HI vsx_ld_elemrev_v8hi {ldvec,endian} > + > + pure vsc __builtin_vsx_ld_elemrev_v16qi (signed long, const void *); > + LD_ELEMREV_V16QI vsx_ld_elemrev_v16qi {ldvec,endian} Seems straightforward, I've admittedly not lined up and confirmed all the arguments versus the builtins. > + > +; There is apparent intent in rs6000-builtin.def to have RS6000_BTC_SPECIAL > +; processing for LXSDX, LXVDSX, and STXSDX, but there are no def_builtin > calls > +; for any of them. At some point, we may want to add a set of built-ins for > +; whichever vector types make sense for these. Add a "TODO:" label ? > + > + pure vsq __builtin_vsx_lxvd2x_v1ti (signed long, const void *); > + LXVD2X_V1TI vsx_load_v1ti {ldvec} > + > + pure vd __builtin_vsx_lxvd2x_v2df (signed long, const void *); > + LXVD2X_V2DF vsx_load_v2df {ldvec} > + > + pure vsll __builtin_vsx_lxvd2x_v2di (signed long, const void *); > + LXVD2X_V2DI vsx_load_v2di {ldvec} > + > + pure vsc __builtin_vsx_lxvw4x_v16qi (signed long, const void *); > + LXVW4X_V16QI vsx_load_v16qi {ldvec} > + > + pure vf __builtin_vsx_lxvw4x_v4sf (signed long, const void *); > + LXVW4X_V4SF vsx_load_v4sf {ldvec} > + > + pure vsi __builtin_vsx_lxvw4x_v4si (signed long, const void *); > + LXVW4X_V4SI vsx_load_v4si {ldvec} > + > + pure vss __builtin_vsx_lxvw4x_v8hi (signed long, const void *); > + LXVW4X_V8HI vsx_load_v8hi {ldvec} > + > + const vd __builtin_vsx_mergeh_2df (vd, vd); > + VEC_MERGEH_V2DF vsx_mergeh_v2df {} > + > + const vsll __builtin_vsx_mergeh_2di (vsll, vsll); > + VEC_MERGEH_V2DI vsx_mergeh_v2di {} > + > + const vd __builtin_vsx_mergel_2df (vd, vd); > + VEC_MERGEL_V2DF vsx_mergel_v2df {} > + > + const vsll __builtin_vsx_mergel_2di (vsll, vsll); > + VEC_MERGEL_V2DI vsx_mergel_v2di {} > + > + const vsll __builtin_vsx_mul_2di (vsll, vsll); > + MUL_V2DI vsx_mul_v2di {} > + > + const vsq __builtin_vsx_set_1ti (vsq, signed __int128, const int<0,0>); > + SET_1TI vsx_set_v1ti {set} > + > + const vd __builtin_vsx_set_2df (vd, double, const int<0,1>); > + SET_2DF vsx_set_v2df {set} > + > + const vsll __builtin_vsx_set_2di (vsll, signed long long, const int<0,1>); > + SET_2DI vsx_set_v2di {set} > + > + const vd __builtin_vsx_splat_2df (double); > + SPLAT_2DF vsx_splat_v2df {} > + > + const vsll __builtin_vsx_splat_2di (signed long long); > + SPLAT_2DI vsx_splat_v2di {} > + > + void __builtin_vsx_st_elemrev_v1ti (vsq, signed long, void *); > + ST_ELEMREV_V1TI vsx_st_elemrev_v1ti {stvec,endian} > + > + void __builtin_vsx_st_elemrev_v2df (vd, signed long, void *); > + ST_ELEMREV_V2DF vsx_st_elemrev_v2df {stvec,endian} > + > + void __builtin_vsx_st_elemrev_v2di (vsll, signed long, void *); > + ST_ELEMREV_V2DI vsx_st_elemrev_v2di {stvec,endian} > + > + void __builtin_vsx_st_elemrev_v4sf (vf, signed long, void *); > + ST_ELEMREV_V4SF vsx_st_elemrev_v4sf {stvec,endian} > + > + void __builtin_vsx_st_elemrev_v4si (vsi, signed long, void *); > + ST_ELEMREV_V4SI vsx_st_elemrev_v4si {stvec,endian} > + > + void __builtin_vsx_st_elemrev_v8hi (vss, signed long, void *); > + ST_ELEMREV_V8HI vsx_st_elemrev_v8hi {stvec,endian} > + > + void __builtin_vsx_st_elemrev_v16qi (vsc, signed long, void *); > + ST_ELEMREV_V16QI vsx_st_elemrev_v16qi {stvec,endian} > + > + void __builtin_vsx_stxvd2x_v1ti (vsq, signed long, void *); > + STXVD2X_V1TI vsx_store_v1ti {stvec} > + > + void __builtin_vsx_stxvd2x_v2df (vd, signed long, void *); > + STXVD2X_V2DF vsx_store_v2df {stvec} > + > + void __builtin_vsx_stxvd2x_v2di (vsll, signed long, void *); > + STXVD2X_V2DI vsx_store_v2di {stvec} > + > + void __builtin_vsx_stxvw4x_v4sf (vf, signed long, void *); > + STXVW4X_V4SF vsx_store_v4sf {stvec} > + > + void __builtin_vsx_stxvw4x_v4si (vsi, signed long, void *); > + STXVW4X_V4SI vsx_store_v4si {stvec} > + > + void __builtin_vsx_stxvw4x_v8hi (vss, signed long, void *); > + STXVW4X_V8HI vsx_store_v8hi {stvec} > + > + void __builtin_vsx_stxvw4x_v16qi (vsc, signed long, void *); > + STXVW4X_V16QI vsx_store_v16qi {stvec} > + > + const vull __builtin_vsx_udiv_2di (vull, vull); > + UDIV_V2DI vsx_udiv_v2di {} > + > + const vd __builtin_vsx_uns_doublee_v4si (vsi); > + UNS_DOUBLEE_V4SI unsdoubleev4si2 {} > + > + const vd __builtin_vsx_uns_doubleh_v4si (vsi); > + UNS_DOUBLEH_V4SI unsdoublehv4si2 {} > + > + const vd __builtin_vsx_uns_doublel_v4si (vsi); > + UNS_DOUBLEL_V4SI unsdoublelv4si2 {} > + > + const vd __builtin_vsx_uns_doubleo_v4si (vsi); > + UNS_DOUBLEO_V4SI unsdoubleov4si2 {} > + > + const vf __builtin_vsx_uns_floate_v2di (vsll); > + UNS_FLOATE_V2DI unsfloatev2di {} > + > + const vf __builtin_vsx_uns_floato_v2di (vsll); > + UNS_FLOATO_V2DI unsfloatov2di {} > + > +; I have no idea why we have __builtin_vsx_* duplicates of these when > +; the __builtin_altivec_* counterparts are already present. Keeping > +; them for compatibility, but...oy. Oy indeed. Perhaps adding a straightforward statement of "These are duplicates of __builtin_altivec_* builtins, and are here for backwards compatibility.". Perhaps another TODO: label to someday later deprecate if so desired? No further comments, Thanks -Will > + const vsc __builtin_vsx_vperm_16qi (vsc, vsc, vuc); > + VPERM_16QI_X altivec_vperm_v16qi {} > + > + const vuc __builtin_vsx_vperm_16qi_uns (vuc, vuc, vuc); > + VPERM_16QI_UNS_X altivec_vperm_v16qi_uns {} > + > + const vsq __builtin_vsx_vperm_1ti (vsq, vsq, vsc); > + VPERM_1TI_X altivec_vperm_v1ti {} > + > + const vsq __builtin_vsx_vperm_1ti_uns (vsq, vsq, vsc); > + VPERM_1TI_UNS_X altivec_vperm_v1ti_uns {} > + > + const vd __builtin_vsx_vperm_2df (vd, vd, vuc); > + VPERM_2DF_X altivec_vperm_v2df {} > + > + const vsll __builtin_vsx_vperm_2di (vsll, vsll, vuc); > + VPERM_2DI_X altivec_vperm_v2di {} > + > + const vull __builtin_vsx_vperm_2di_uns (vull, vull, vuc); > + VPERM_2DI_UNS_X altivec_vperm_v2di_uns {} > + > + const vf __builtin_vsx_vperm_4sf (vf, vf, vuc); > + VPERM_4SF_X altivec_vperm_v4sf {} > + > + const vsi __builtin_vsx_vperm_4si (vsi, vsi, vuc); > + VPERM_4SI_X altivec_vperm_v4si {} > + > + const vui __builtin_vsx_vperm_4si_uns (vui, vui, vuc); > + VPERM_4SI_UNS_X altivec_vperm_v4si_uns {} > + > + const vss __builtin_vsx_vperm_8hi (vss, vss, vuc); > + VPERM_8HI_X altivec_vperm_v8hi {} > + > + const vus __builtin_vsx_vperm_8hi_uns (vus, vus, vuc); > + VPERM_8HI_UNS_X altivec_vperm_v8hi_uns {} > + > + const vsll __builtin_vsx_vsigned_v2df (vd); > + VEC_VSIGNED_V2DF vsx_xvcvdpsxds {} > + > + const vsi __builtin_vsx_vsigned_v4sf (vf); > + VEC_VSIGNED_V4SF vsx_xvcvspsxws {} > + > + const vsi __builtin_vsx_vsignede_v2df (vd); > + VEC_VSIGNEDE_V2DF vsignede_v2df {} > + > + const vsi __builtin_vsx_vsignedo_v2df (vd); > + VEC_VSIGNEDO_V2DF vsignedo_v2df {} > + > + const vsll __builtin_vsx_vunsigned_v2df (vd); > + VEC_VUNSIGNED_V2DF vsx_xvcvdpsxds {} > + > + const vsi __builtin_vsx_vunsigned_v4sf (vf); > + VEC_VUNSIGNED_V4SF vsx_xvcvspsxws {} > + > + const vsi __builtin_vsx_vunsignede_v2df (vd); > + VEC_VUNSIGNEDE_V2DF vunsignede_v2df {} > + > + const vsi __builtin_vsx_vunsignedo_v2df (vd); > + VEC_VUNSIGNEDO_V2DF vunsignedo_v2df {} > + > + const vf __builtin_vsx_xscvdpsp (double); > + XSCVDPSP vsx_xscvdpsp {} > + > + const double __builtin_vsx_xscvspdp (vf); > + XSCVSPDP vsx_xscvspdp {} > + > + const double __builtin_vsx_xsmaxdp (double, double); > + XSMAXDP smaxdf3 {} > + > + const double __builtin_vsx_xsmindp (double, double); > + XSMINDP smindf3 {} > + > + const double __builtin_vsx_xsrdpi (double); > + XSRDPI vsx_xsrdpi {} > + > + const double __builtin_vsx_xsrdpic (double); > + XSRDPIC vsx_xsrdpic {} > + > + const double __builtin_vsx_xsrdpim (double); > + XSRDPIM floordf2 {} > + > + const double __builtin_vsx_xsrdpip (double); > + XSRDPIP ceildf2 {} > + > + const double __builtin_vsx_xsrdpiz (double); > + XSRDPIZ btruncdf2 {} > + > + const signed int __builtin_vsx_xstdivdp_fe (double, double); > + XSTDIVDP_FE vsx_tdivdf3_fe {} > + > + const signed int __builtin_vsx_xstdivdp_fg (double, double); > + XSTDIVDP_FG vsx_tdivdf3_fg {} > + > + const signed int __builtin_vsx_xstsqrtdp_fe (double); > + XSTSQRTDP_FE vsx_tsqrtdf2_fe {} > + > + const signed int __builtin_vsx_xstsqrtdp_fg (double); > + XSTSQRTDP_FG vsx_tsqrtdf2_fg {} > + > + const vd __builtin_vsx_xvabsdp (vd); > + XVABSDP absv2df2 {} > + > + const vf __builtin_vsx_xvabssp (vf); > + XVABSSP absv4sf2 {} > + > + fpmath vd __builtin_vsx_xvadddp (vd, vd); > + XVADDDP addv2df3 {} > + > + fpmath vf __builtin_vsx_xvaddsp (vf, vf); > + XVADDSP addv4sf3 {} > + > + const vd __builtin_vsx_xvcmpeqdp (vd, vd); > + XVCMPEQDP vector_eqv2df {} > + > + const signed int __builtin_vsx_xvcmpeqdp_p (signed int, vd, vd); > + XVCMPEQDP_P vector_eq_v2df_p {pred} > + > + const vf __builtin_vsx_xvcmpeqsp (vf, vf); > + XVCMPEQSP vector_eqv4sf {} > + > + const signed int __builtin_vsx_xvcmpeqsp_p (signed int, vf, vf); > + XVCMPEQSP_P vector_eq_v4sf_p {pred} > + > + const vd __builtin_vsx_xvcmpgedp (vd, vd); > + XVCMPGEDP vector_gev2df {} > + > + const signed int __builtin_vsx_xvcmpgedp_p (signed int, vd, vd); > + XVCMPGEDP_P vector_ge_v2df_p {pred} > + > + const vf __builtin_vsx_xvcmpgesp (vf, vf); > + XVCMPGESP vector_gev4sf {} > + > + const signed int __builtin_vsx_xvcmpgesp_p (signed int, vf, vf); > + XVCMPGESP_P vector_ge_v4sf_p {pred} > + > + const vd __builtin_vsx_xvcmpgtdp (vd, vd); > + XVCMPGTDP vector_gtv2df {} > + > + const signed int __builtin_vsx_xvcmpgtdp_p (signed int, vd, vd); > + XVCMPGTDP_P vector_gt_v2df_p {pred} > + > + const vf __builtin_vsx_xvcmpgtsp (vf, vf); > + XVCMPGTSP vector_gtv4sf {} > + > + const signed int __builtin_vsx_xvcmpgtsp_p (signed int, vf, vf); > + XVCMPGTSP_P vector_gt_v4sf_p {pred} > + > + const vf __builtin_vsx_xvcvdpsp (vd); > + XVCVDPSP vsx_xvcvdpsp {} > + > + const vsll __builtin_vsx_xvcvdpsxds (vd); > + XVCVDPSXDS vsx_fix_truncv2dfv2di2 {} > + > + const vsll __builtin_vsx_xvcvdpsxds_scale (vd, const int); > + XVCVDPSXDS_SCALE vsx_xvcvdpsxds_scale {} > + > + const vsi __builtin_vsx_xvcvdpsxws (vd); > + XVCVDPSXWS vsx_xvcvdpsxws {} > + > + const vsll __builtin_vsx_xvcvdpuxds (vd); > + XVCVDPUXDS vsx_fixuns_truncv2dfv2di2 {} > + > + const vsll __builtin_vsx_xvcvdpuxds_scale (vd, const int); > + XVCVDPUXDS_SCALE vsx_xvcvdpuxds_scale {} > + > + const vull __builtin_vsx_xvcvdpuxds_uns (vd); > + XVCVDPUXDS_UNS vsx_fixuns_truncv2dfv2di2 {} > + > + const vsi __builtin_vsx_xvcvdpuxws (vd); > + XVCVDPUXWS vsx_xvcvdpuxws {} > + > + const vd __builtin_vsx_xvcvspdp (vf); > + XVCVSPDP vsx_xvcvspdp {} > + > + const vsll __builtin_vsx_xvcvspsxds (vf); > + XVCVSPSXDS vsx_xvcvspsxds {} > + > + const vsi __builtin_vsx_xvcvspsxws (vf); > + XVCVSPSXWS vsx_fix_truncv4sfv4si2 {} > + > + const vsll __builtin_vsx_xvcvspuxds (vf); > + XVCVSPUXDS vsx_xvcvspuxds {} > + > + const vsi __builtin_vsx_xvcvspuxws (vf); > + XVCVSPUXWS vsx_fixuns_truncv4sfv4si2 {} > + > + const vd __builtin_vsx_xvcvsxddp (vsll); > + XVCVSXDDP vsx_floatv2div2df2 {} > + > + const vd __builtin_vsx_xvcvsxddp_scale (vsll, const int<5>); > + XVCVSXDDP_SCALE vsx_xvcvsxddp_scale {} > + > + const vf __builtin_vsx_xvcvsxdsp (vsll); > + XVCVSXDSP vsx_xvcvsxdsp {} > + > + const vd __builtin_vsx_xvcvsxwdp (vsi); > + XVCVSXWDP vsx_xvcvsxwdp {} > + > + const vf __builtin_vsx_xvcvsxwsp (vsi); > + XVCVSXWSP vsx_floatv4siv4sf2 {} > + > + const vd __builtin_vsx_xvcvuxddp (vsll); > + XVCVUXDDP vsx_floatunsv2div2df2 {} > + > + const vd __builtin_vsx_xvcvuxddp_scale (vsll, const int<5>); > + XVCVUXDDP_SCALE vsx_xvcvuxddp_scale {} > + > + const vd __builtin_vsx_xvcvuxddp_uns (vull); > + XVCVUXDDP_UNS vsx_floatunsv2div2df2 {} > + > + const vf __builtin_vsx_xvcvuxdsp (vull); > + XVCVUXDSP vsx_xvcvuxdsp {} > + > + const vd __builtin_vsx_xvcvuxwdp (vsi); > + XVCVUXWDP vsx_xvcvuxwdp {} > + > + const vf __builtin_vsx_xvcvuxwsp (vsi); > + XVCVUXWSP vsx_floatunsv4siv4sf2 {} > + > + fpmath vd __builtin_vsx_xvdivdp (vd, vd); > + XVDIVDP divv2df3 {} > + > + fpmath vf __builtin_vsx_xvdivsp (vf, vf); > + XVDIVSP divv4sf3 {} > + > + const vd __builtin_vsx_xvmadddp (vd, vd, vd); > + XVMADDDP fmav2df4 {} > + > + const vf __builtin_vsx_xvmaddsp (vf, vf, vf); > + XVMADDSP fmav4sf4 {} > + > + const vd __builtin_vsx_xvmaxdp (vd, vd); > + XVMAXDP smaxv2df3 {} > + > + const vf __builtin_vsx_xvmaxsp (vf, vf); > + XVMAXSP smaxv4sf3 {} > + > + const vd __builtin_vsx_xvmindp (vd, vd); > + XVMINDP sminv2df3 {} > + > + const vf __builtin_vsx_xvminsp (vf, vf); > + XVMINSP sminv4sf3 {} > + > + const vd __builtin_vsx_xvmsubdp (vd, vd, vd); > + XVMSUBDP fmsv2df4 {} > + > + const vf __builtin_vsx_xvmsubsp (vf, vf, vf); > + XVMSUBSP fmsv4sf4 {} > + > + fpmath vd __builtin_vsx_xvmuldp (vd, vd); > + XVMULDP mulv2df3 {} > + > + fpmath vf __builtin_vsx_xvmulsp (vf, vf); > + XVMULSP mulv4sf3 {} > + > + const vd __builtin_vsx_xvnabsdp (vd); > + XVNABSDP vsx_nabsv2df2 {} > + > + const vf __builtin_vsx_xvnabssp (vf); > + XVNABSSP vsx_nabsv4sf2 {} > + > + const vd __builtin_vsx_xvnegdp (vd); > + XVNEGDP negv2df2 {} > + > + const vf __builtin_vsx_xvnegsp (vf); > + XVNEGSP negv4sf2 {} > + > + const vd __builtin_vsx_xvnmadddp (vd, vd, vd); > + XVNMADDDP nfmav2df4 {} > + > + const vf __builtin_vsx_xvnmaddsp (vf, vf, vf); > + XVNMADDSP nfmav4sf4 {} > + > + const vd __builtin_vsx_xvnmsubdp (vd, vd, vd); > + XVNMSUBDP nfmsv2df4 {} > + > + const vf __builtin_vsx_xvnmsubsp (vf, vf, vf); > + XVNMSUBSP nfmsv4sf4 {} > + > + const vd __builtin_vsx_xvrdpi (vd); > + XVRDPI vsx_xvrdpi {} > + > + const vd __builtin_vsx_xvrdpic (vd); > + XVRDPIC vsx_xvrdpic {} > + > + const vd __builtin_vsx_xvrdpim (vd); > + XVRDPIM vsx_floorv2df2 {} > + > + const vd __builtin_vsx_xvrdpip (vd); > + XVRDPIP vsx_ceilv2df2 {} > + > + const vd __builtin_vsx_xvrdpiz (vd); > + XVRDPIZ vsx_btruncv2df2 {} > + > + fpmath vd __builtin_vsx_xvrecipdivdp (vd, vd); > + RECIP_V2DF recipv2df3 {} > + > + fpmath vf __builtin_vsx_xvrecipdivsp (vf, vf); > + RECIP_V4SF recipv4sf3 {} > + > + const vd __builtin_vsx_xvredp (vd); > + XVREDP vsx_frev2df2 {} > + > + const vf __builtin_vsx_xvresp (vf); > + XVRESP vsx_frev4sf2 {} > + > + const vf __builtin_vsx_xvrspi (vf); > + XVRSPI vsx_xvrspi {} > + > + const vf __builtin_vsx_xvrspic (vf); > + XVRSPIC vsx_xvrspic {} > + > + const vf __builtin_vsx_xvrspim (vf); > + XVRSPIM vsx_floorv4sf2 {} > + > + const vf __builtin_vsx_xvrspip (vf); > + XVRSPIP vsx_ceilv4sf2 {} > + > + const vf __builtin_vsx_xvrspiz (vf); > + XVRSPIZ vsx_btruncv4sf2 {} > + > + const vd __builtin_vsx_xvrsqrtdp (vd); > + RSQRT_2DF rsqrtv2df2 {} > + > + const vf __builtin_vsx_xvrsqrtsp (vf); > + RSQRT_4SF rsqrtv4sf2 {} > + > + const vd __builtin_vsx_xvrsqrtedp (vd); > + XVRSQRTEDP rsqrtev2df2 {} > + > + const vf __builtin_vsx_xvrsqrtesp (vf); > + XVRSQRTESP rsqrtev4sf2 {} > + > + const vd __builtin_vsx_xvsqrtdp (vd); > + XVSQRTDP sqrtv2df2 {} > + > + const vf __builtin_vsx_xvsqrtsp (vf); > + XVSQRTSP sqrtv4sf2 {} > + > + fpmath vd __builtin_vsx_xvsubdp (vd, vd); > + XVSUBDP subv2df3 {} > + > + fpmath vf __builtin_vsx_xvsubsp (vf, vf); > + XVSUBSP subv4sf3 {} > + > + const signed int __builtin_vsx_xvtdivdp_fe (vd, vd); > + XVTDIVDP_FE vsx_tdivv2df3_fe {} > + > + const signed int __builtin_vsx_xvtdivdp_fg (vd, vd); > + XVTDIVDP_FG vsx_tdivv2df3_fg {} > + > + const signed int __builtin_vsx_xvtdivsp_fe (vf, vf); > + XVTDIVSP_FE vsx_tdivv4sf3_fe {} > + > + const signed int __builtin_vsx_xvtdivsp_fg (vf, vf); > + XVTDIVSP_FG vsx_tdivv4sf3_fg {} > + > + const signed int __builtin_vsx_xvtsqrtdp_fe (vd); > + XVTSQRTDP_FE vsx_tsqrtv2df2_fe {} > + > + const signed int __builtin_vsx_xvtsqrtdp_fg (vd); > + XVTSQRTDP_FG vsx_tsqrtv2df2_fg {} > + > + const signed int __builtin_vsx_xvtsqrtsp_fe (vf); > + XVTSQRTSP_FE vsx_tsqrtv4sf2_fe {} > + > + const signed int __builtin_vsx_xvtsqrtsp_fg (vf); > + XVTSQRTSP_FG vsx_tsqrtv4sf2_fg {} > + > + const vf __builtin_vsx_xxmrghw (vf, vf); > + XXMRGHW_4SF vsx_xxmrghw_v4sf {} > + > + const vsi __builtin_vsx_xxmrghw_4si (vsi, vsi); > + XXMRGHW_4SI vsx_xxmrghw_v4si {} > + > + const vf __builtin_vsx_xxmrglw (vf, vf); > + XXMRGLW_4SF vsx_xxmrglw_v4sf {} > + > + const vsi __builtin_vsx_xxmrglw_4si (vsi, vsi); > + XXMRGLW_4SI vsx_xxmrglw_v4si {} > + > + const vsc __builtin_vsx_xxpermdi_16qi (vsc, vsc, const int<2>); > + XXPERMDI_16QI vsx_xxpermdi_v16qi {} > + > + const vsq __builtin_vsx_xxpermdi_1ti (vsq, vsq, const int<2>); > + XXPERMDI_1TI vsx_xxpermdi_v1ti {} > + > + const vd __builtin_vsx_xxpermdi_2df (vd, vd, const int<2>); > + XXPERMDI_2DF vsx_xxpermdi_v2df {} > + > + const vsll __builtin_vsx_xxpermdi_2di (vsll, vsll, const int<2>); > + XXPERMDI_2DI vsx_xxpermdi_v2di {} > + > + const vf __builtin_vsx_xxpermdi_4sf (vf, vf, const int<2>); > + XXPERMDI_4SF vsx_xxpermdi_v4sf {} > + > + const vsi __builtin_vsx_xxpermdi_4si (vsi, vsi, const int<2>); > + XXPERMDI_4SI vsx_xxpermdi_v4si {} > + > + const vss __builtin_vsx_xxpermdi_8hi (vss, vss, const int<2>); > + XXPERMDI_8HI vsx_xxpermdi_v8hi {} > + > + const vsc __builtin_vsx_xxsel_16qi (vsc, vsc, vsc); > + XXSEL_16QI vector_select_v16qi {} > + > + const vuc __builtin_vsx_xxsel_16qi_uns (vuc, vuc, vuc); > + XXSEL_16QI_UNS vector_select_v16qi_uns {} > + > + const vsq __builtin_vsx_xxsel_1ti (vsq, vsq, vsq); > + XXSEL_1TI vector_select_v1ti {} > + > + const vsq __builtin_vsx_xxsel_1ti_uns (vsq, vsq, vsq); > + XXSEL_1TI_UNS vector_select_v1ti_uns {} > + > + const vd __builtin_vsx_xxsel_2df (vd, vd, vd); > + XXSEL_2DF vector_select_v2df {} > + > + const vsll __builtin_vsx_xxsel_2di (vsll, vsll, vsll); > + XXSEL_2DI vector_select_v2di {} > + > + const vull __builtin_vsx_xxsel_2di_uns (vull, vull, vull); > + XXSEL_2DI_UNS vector_select_v2di_uns {} > + > + const vf __builtin_vsx_xxsel_4sf (vf, vf, vf); > + XXSEL_4SF vector_select_v4sf {} > + > + const vsi __builtin_vsx_xxsel_4si (vsi, vsi, vsi); > + XXSEL_4SI vector_select_v4si {} > + > + const vui __builtin_vsx_xxsel_4si_uns (vui, vui, vui); > + XXSEL_4SI_UNS vector_select_v4si_uns {} > + > + const vss __builtin_vsx_xxsel_8hi (vss, vss, vss); > + XXSEL_8HI vector_select_v8hi {} > + > + const vus __builtin_vsx_xxsel_8hi_uns (vus, vus, vus); > + XXSEL_8HI_UNS vector_select_v8hi_uns {} > + > + const vsc __builtin_vsx_xxsldwi_16qi (vsc, vsc, const int<2>); > + XXSLDWI_16QI vsx_xxsldwi_v16qi {} > + > + const vd __builtin_vsx_xxsldwi_2df (vd, vd, const int<2>); > + XXSLDWI_2DF vsx_xxsldwi_v2df {} > + > + const vsll __builtin_vsx_xxsldwi_2di (vsll, vsll, const int<2>); > + XXSLDWI_2DI vsx_xxsldwi_v2di {} > + > + const vf __builtin_vsx_xxsldwi_4sf (vf, vf, const int<2>); > + XXSLDWI_4SF vsx_xxsldwi_v4sf {} > + > + const vsi __builtin_vsx_xxsldwi_4si (vsi, vsi, const int<2>); > + XXSLDWI_4SI vsx_xxsldwi_v4si {} > + > + const vss __builtin_vsx_xxsldwi_8hi (vss, vss, const int<2>); > + XXSLDWI_8HI vsx_xxsldwi_v8hi {} > + > + const vd __builtin_vsx_xxspltd_2df (vd, const int<1>); > + XXSPLTD_V2DF vsx_xxspltd_v2df {} > + > + const vsll __builtin_vsx_xxspltd_2di (vsll, const int<1>); > + XXSPLTD_V2DI vsx_xxspltd_v2di {}