amyk created this revision. amyk added reviewers: power-llvm-team, PowerPC, nemanjai, saghir, Conanap. amyk added projects: LLVM, clang, PowerPC. Herald added subscribers: shchenz, hiraditya. amyk requested review of this revision.
This patch implements the vec_[all|any]_[eq | ne | lt | gt | le | ge] builtins for vector signed/unsigned __int128. Depends on D87804 <https://reviews.llvm.org/D87804> Repository: rG LLVM Github Monorepo https://reviews.llvm.org/D87910 Files: clang/include/clang/Basic/BuiltinsPPC.def clang/lib/CodeGen/CGExprScalar.cpp clang/lib/Headers/altivec.h clang/test/CodeGen/builtins-ppc-p10vector.c llvm/include/llvm/IR/IntrinsicsPowerPC.td llvm/lib/Target/PowerPC/PPCISelLowering.cpp llvm/test/CodeGen/PowerPC/vec_cmpq.ll
Index: llvm/test/CodeGen/PowerPC/vec_cmpq.ll =================================================================== --- llvm/test/CodeGen/PowerPC/vec_cmpq.ll +++ llvm/test/CodeGen/PowerPC/vec_cmpq.ll @@ -231,3 +231,31 @@ ; CHECK-LABEL: test_vcmpgtuq ; CHECK: vcmpgtuq {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} } + +declare i32 @llvm.ppc.altivec.vcmpequq.p(i32, <1 x i128>, <1 x i128>) nounwind readnone +declare i32 @llvm.ppc.altivec.vcmpgtsq.p(i32, <1 x i128>, <1 x i128>) nounwind readnone +declare i32 @llvm.ppc.altivec.vcmpgtuq.p(i32, <1 x i128>, <1 x i128>) nounwind readnone + +define i32 @test_vcmpequq_p(<1 x i128> %x, <1 x i128> %y) { + %tmp = tail call i32 @llvm.ppc.altivec.vcmpequq.p(i32 2, <1 x i128> %x, <1 x i128> %y) + ret i32 %tmp +; CHECK-LABEL: test_vcmpequq_p: +; CHECK: vcmpequq. {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: blr +} + +define i32 @test_vcmpgtsq_p(<1 x i128> %x, <1 x i128> %y) { + %tmp = tail call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 2, <1 x i128> %x, <1 x i128> %y) + ret i32 %tmp +; CHECK-LABEL: test_vcmpgtsq_p +; CHECK: vcmpgtsq. {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: blr +} + +define i32 @test_vcmpgtuq_p(<1 x i128> %x, <1 x i128> %y) { + %tmp = tail call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 2, <1 x i128> %x, <1 x i128> %y) + ret i32 %tmp +; CHECK-LABEL: test_vcmpgtuq_p +; CHECK: vcmpgtuq. {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: blr +} Index: llvm/lib/Target/PowerPC/PPCISelLowering.cpp =================================================================== --- llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -10323,6 +10323,26 @@ break; } break; + case Intrinsic::ppc_altivec_vcmpequq_p: + case Intrinsic::ppc_altivec_vcmpgtsq_p: + case Intrinsic::ppc_altivec_vcmpgtuq_p: + if (!Subtarget.isISA3_1()) + return false; + switch (IntrinsicID) { + default: + llvm_unreachable("Unknown comparison intrinsic."); + case Intrinsic::ppc_altivec_vcmpequq_p: + CompareOpc = 455; + break; + case Intrinsic::ppc_altivec_vcmpgtsq_p: + CompareOpc = 903; + break; + case Intrinsic::ppc_altivec_vcmpgtuq_p: + CompareOpc = 647; + break; + } + isDot = true; + break; } return true; } @@ -15189,16 +15209,19 @@ case Intrinsic::ppc_altivec_vcmpequh_p: case Intrinsic::ppc_altivec_vcmpequw_p: case Intrinsic::ppc_altivec_vcmpequd_p: + case Intrinsic::ppc_altivec_vcmpequq_p: case Intrinsic::ppc_altivec_vcmpgefp_p: case Intrinsic::ppc_altivec_vcmpgtfp_p: case Intrinsic::ppc_altivec_vcmpgtsb_p: case Intrinsic::ppc_altivec_vcmpgtsh_p: case Intrinsic::ppc_altivec_vcmpgtsw_p: case Intrinsic::ppc_altivec_vcmpgtsd_p: + case Intrinsic::ppc_altivec_vcmpgtsq_p: case Intrinsic::ppc_altivec_vcmpgtub_p: case Intrinsic::ppc_altivec_vcmpgtuh_p: case Intrinsic::ppc_altivec_vcmpgtuw_p: case Intrinsic::ppc_altivec_vcmpgtud_p: + case Intrinsic::ppc_altivec_vcmpgtuq_p: Known.Zero = ~1U; // All bits but the low one are known to be zero. break; } Index: llvm/include/llvm/IR/IntrinsicsPowerPC.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsPowerPC.td +++ llvm/include/llvm/IR/IntrinsicsPowerPC.td @@ -370,6 +370,18 @@ def int_ppc_altivec_vcmpgtuq : GCCBuiltin<"__builtin_altivec_vcmpgtuq">, Intrinsic<[llvm_v1i128_ty], [llvm_v1i128_ty, llvm_v1i128_ty], [IntrNoMem]>; + def int_ppc_altivec_vcmpequq_p : GCCBuiltin<"__builtin_altivec_vcmpequq_p">, + Intrinsic<[llvm_i32_ty], + [llvm_i32_ty,llvm_v1i128_ty,llvm_v1i128_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgtsq_p : GCCBuiltin<"__builtin_altivec_vcmpgtsq_p">, + Intrinsic<[llvm_i32_ty], + [llvm_i32_ty,llvm_v1i128_ty,llvm_v1i128_ty], + [IntrNoMem]>; + def int_ppc_altivec_vcmpgtuq_p : GCCBuiltin<"__builtin_altivec_vcmpgtuq_p">, + Intrinsic<[llvm_i32_ty], + [llvm_i32_ty,llvm_v1i128_ty,llvm_v1i128_ty], + [IntrNoMem]>; // Predicate Comparisons. The first operand specifies interpretation of CR6. def int_ppc_altivec_vcmpbfp_p : GCCBuiltin<"__builtin_altivec_vcmpbfp_p">, Index: clang/test/CodeGen/builtins-ppc-p10vector.c =================================================================== --- clang/test/CodeGen/builtins-ppc-p10vector.c +++ clang/test/CodeGen/builtins-ppc-p10vector.c @@ -1158,3 +1158,171 @@ // CHECK-NEXT: ret <1 x i128> return vec_cmple(vui128a, vui128b); } + +int test_vec_any_eq_u128(void) { + // CHECK-LABEL: @test_vec_any_eq_u128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 1, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_any_eq(vui128a, vui128b); +} + +int test_vec_any_eq_s128(void) { + // CHECK-LABEL: @test_vec_any_eq_s128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 1, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_any_eq(vsi128a, vsi128b); +} + +int test_vec_any_ne_s128(void) { + // CHECK-LABEL: @test_vec_any_ne_s128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 3, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_any_ne(vsi128a, vsi128b); +} + +int test_vec_any_ne_u128(void) { + // CHECK-LABEL: @test_vec_any_ne_u128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 3, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_any_ne(vui128a, vui128b); +} + +int test_vec_any_lt_s128(void) { + // CHECK-LABEL: @test_vec_any_lt_s128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 1, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_any_lt(vsi128a, vsi128b); +} + +int test_vec_any_lt_u128(void) { + // CHECK-LABEL: @test_vec_any_lt_u128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 1, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_any_lt(vui128a, vui128b); +} + +int test_vec_any_gt_s128(void) { + // CHECK-LABEL: @test_vec_any_gt_s128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 1, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_any_gt(vsi128a, vsi128b); +} + +int test_vec_any_gt_u128(void) { + // CHECK-LABEL: @test_vec_any_gt_u128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 1, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_any_gt(vui128a, vui128b); +} + +int test_vec_any_le_s128(void) { + // CHECK-LABEL: @test_vec_any_le_s128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 3, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_any_le(vsi128a, vsi128b); +} + +int test_vec_any_le_u128(void) { + // CHECK-LABEL: @test_vec_any_le_u128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 3, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_any_le(vui128a, vui128b); +} + +int test_vec_any_ge_s128(void) { + // CHECK-LABEL: @test_vec_any_ge_s128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 3, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_any_ge(vsi128a, vsi128b); +} + +int test_vec_any_ge_u128(void) { + // CHECK-LABEL: @test_vec_any_ge_u128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 3, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_any_ge(vui128a, vui128b); +} + +int test_vec_all_eq_s128(void) { + // CHECK-LABEL: @test_vec_all_eq_s128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 2, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_all_eq(vsi128a, vsi128b); +} + +int test_vec_all_eq_u128(void) { + // CHECK-LABEL: @test_vec_all_eq_u128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 2, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_all_eq(vui128a, vui128b); +} + +int test_vec_all_ne_s128(void) { + // CHECK-LABEL: @test_vec_all_ne_s128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 0, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_all_ne(vsi128a, vsi128b); +} + +int test_vec_all_ne_u128(void) { + // CHECK-LABEL: @test_vec_all_ne_u128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 0, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_all_ne(vui128a, vui128b); +} + +int test_vec_all_lt_s128(void) { + // CHECK-LABEL: @test_vec_all_lt_s128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 2, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_all_lt(vsi128a, vsi128b); +} + +int test_vec_all_lt_u128(void) { + // CHECK-LABEL: @test_vec_all_lt_u128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 2, <1 x i128> %2, <1 x i128> %3) + // CHECK: ret i32 + return vec_all_lt(vui128a, vui128b); +} + +int test_vec_all_gt_s128(void) { + // CHECK-LABEL: @test_vec_all_gt_s128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 2, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_all_gt(vsi128a, vsi128b); +} + +int test_vec_all_gt_u128(void) { + // CHECK-LABEL: @test_vec_all_gt_u128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 2, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_all_gt(vui128a, vui128b); +} + +int test_vec_all_le_s128(void) { + // CHECK-LABEL: @test_vec_all_le_s128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 0, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_all_le(vsi128a, vsi128b); +} + +int test_vec_all_le_u128(void) { + // CHECK-LABEL: @test_vec_all_le_u128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 0, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_all_le(vui128a, vui128b); +} + +int test_vec_all_ge_s128(void) { + // CHECK-LABEL: @test_vec_all_ge_s128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 0, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_all_ge(vsi128a, vsi128b); +} + +int test_vec_all_ge_u128(void) { + // CHECK-LABEL: @test_vec_all_ge_u128( + // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 0, <1 x i128> %2, <1 x i128> %3) + // CHECK-NEXT: ret i32 + return vec_all_ge(vui128a, vui128b); +} Index: clang/lib/Headers/altivec.h =================================================================== --- clang/lib/Headers/altivec.h +++ clang/lib/Headers/altivec.h @@ -14140,6 +14140,18 @@ } #endif +#ifdef __POWER10_VECTOR__ +static __inline__ int __ATTRS_o_ai +vec_all_eq(vector signed __int128 __a, vector signed __int128 __b) { + return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai +vec_all_eq(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b); +} +#endif + /* vec_all_ge */ static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a, @@ -14311,6 +14323,18 @@ } #endif +#ifdef __POWER10_VECTOR__ +static __inline__ int __ATTRS_o_ai +vec_all_ge(vector signed __int128 __a, vector signed __int128 __b) { + return __builtin_altivec_vcmpgtsq_p(__CR6_EQ, __b, __a); +} + +static __inline__ int __ATTRS_o_ai +vec_all_ge(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __builtin_altivec_vcmpgtuq_p(__CR6_EQ, __b, __a); +} +#endif + /* vec_all_gt */ static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a, @@ -14482,6 +14506,18 @@ } #endif +#ifdef __POWER10_VECTOR__ +static __inline__ int __ATTRS_o_ai +vec_all_gt(vector signed __int128 __a, vector signed __int128 __b) { + return __builtin_altivec_vcmpgtsq_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai +vec_all_gt(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __builtin_altivec_vcmpgtuq_p(__CR6_LT, __a, __b); +} +#endif + /* vec_all_in */ static __inline__ int __attribute__((__always_inline__)) @@ -14661,6 +14697,18 @@ } #endif +#ifdef __POWER10_VECTOR__ +static __inline__ int __ATTRS_o_ai +vec_all_le(vector signed __int128 __a, vector signed __int128 __b) { + return __builtin_altivec_vcmpgtsq_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai +vec_all_le(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __builtin_altivec_vcmpgtuq_p(__CR6_EQ, __a, __b); +} +#endif + /* vec_all_lt */ static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a, @@ -14833,6 +14881,18 @@ } #endif +#ifdef __POWER10_VECTOR__ +static __inline__ int __ATTRS_o_ai +vec_all_lt(vector signed __int128 __a, vector signed __int128 __b) { + return __builtin_altivec_vcmpgtsq_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai +vec_all_lt(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __builtin_altivec_vcmpgtuq_p(__CR6_LT, __b, __a); +} +#endif + /* vec_all_nan */ static __inline__ int __ATTRS_o_ai vec_all_nan(vector float __a) { @@ -15037,6 +15097,18 @@ } #endif +#ifdef __POWER10_VECTOR__ +static __inline__ int __ATTRS_o_ai +vec_all_ne(vector signed __int128 __a, vector signed __int128 __b) { + return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai +vec_all_ne(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b); +} +#endif + /* vec_all_nge */ static __inline__ int __ATTRS_o_ai vec_all_nge(vector float __a, @@ -15282,6 +15354,18 @@ } #endif +#ifdef __POWER10_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b); +} +#endif + /* vec_any_ge */ static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a, @@ -15461,6 +15545,18 @@ } #endif +#ifdef __POWER10_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpgtsq_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpgtuq_p(__CR6_LT_REV, __b, __a); +} +#endif + /* vec_any_gt */ static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a, @@ -15640,6 +15736,18 @@ } #endif +#ifdef __POWER10_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpgtsq_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpgtuq_p(__CR6_EQ_REV, __a, __b); +} +#endif + /* vec_any_le */ static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a, @@ -15819,6 +15927,18 @@ } #endif +#ifdef __POWER10_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_any_le(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpgtsq_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpgtuq_p(__CR6_LT_REV, __a, __b); +} +#endif + /* vec_any_lt */ static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a, @@ -15998,6 +16118,18 @@ } #endif +#ifdef __POWER10_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpgtsq_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpgtuq_p(__CR6_EQ_REV, __b, __a); +} +#endif + /* vec_any_nan */ static __inline__ int __attribute__((__always_inline__)) @@ -16193,6 +16325,18 @@ } #endif +#ifdef __POWER10_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed __int128 __a, + vector signed __int128 __b) { + return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned __int128 __a, + vector unsigned __int128 __b) { + return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b); +} +#endif + /* vec_any_nge */ static __inline__ int __attribute__((__always_inline__)) Index: clang/lib/CodeGen/CGExprScalar.cpp =================================================================== --- clang/lib/CodeGen/CGExprScalar.cpp +++ clang/lib/CodeGen/CGExprScalar.cpp @@ -3882,6 +3882,12 @@ case BuiltinType::Double: return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p : llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p; + case BuiltinType::UInt128: + return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p : + llvm::Intrinsic::ppc_altivec_vcmpgtuq_p; + case BuiltinType::Int128: + return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p : + llvm::Intrinsic::ppc_altivec_vcmpgtsq_p; } } Index: clang/include/clang/Basic/BuiltinsPPC.def =================================================================== --- clang/include/clang/Basic/BuiltinsPPC.def +++ clang/include/clang/Basic/BuiltinsPPC.def @@ -160,6 +160,9 @@ BUILTIN(__builtin_altivec_vcmpequq, "V1LLLiV1ULLLiV1ULLLi", "") BUILTIN(__builtin_altivec_vcmpgtsq, "V1LLLiV1SLLLiV1SLLLi", "") BUILTIN(__builtin_altivec_vcmpgtuq, "V1LLLiV1ULLLiV1ULLLi", "") +BUILTIN(__builtin_altivec_vcmpequq_p, "iiV1ULLLiV1LLLi", "") +BUILTIN(__builtin_altivec_vcmpgtsq_p, "iiV1SLLLiV1SLLLi", "") +BUILTIN(__builtin_altivec_vcmpgtuq_p, "iiV1ULLLiV1ULLLi", "") BUILTIN(__builtin_altivec_vmaxsb, "V16ScV16ScV16Sc", "") BUILTIN(__builtin_altivec_vmaxub, "V16UcV16UcV16Uc", "")
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits