Author: Joseph Huber Date: 2024-07-10T12:56:54-05:00 New Revision: 196ee230fdc9ab90dacfeb846c794f5d0c9d1e0c
URL: https://github.com/llvm/llvm-project/commit/196ee230fdc9ab90dacfeb846c794f5d0c9d1e0c DIFF: https://github.com/llvm/llvm-project/commit/196ee230fdc9ab90dacfeb846c794f5d0c9d1e0c.diff LOG: [Clang] Correctly enable the f16 type for offloading (#98331) Summary: There's an extra argument that's required to *actually* enable f16 usage. For whatever reason there's a difference between fp16 and f16, where fp16 is some weird version that converts between the two. Long story short, without this the math builtins are blatantly broken. Added: clang/test/CodeGen/builtins-nvptx-native-half-type-native.c Modified: clang/lib/Basic/Targets/NVPTX.h Removed: clang/test/CodeGen/builtins-nvptx-native-half-type-err.c ################################################################################ diff --git a/clang/lib/Basic/Targets/NVPTX.h b/clang/lib/Basic/Targets/NVPTX.h index 9a985e46e22da..be43bb04fa2ed 100644 --- a/clang/lib/Basic/Targets/NVPTX.h +++ b/clang/lib/Basic/Targets/NVPTX.h @@ -75,6 +75,8 @@ class LLVM_LIBRARY_VISIBILITY NVPTXTargetInfo : public TargetInfo { ArrayRef<Builtin::Info> getTargetBuiltins() const override; + bool useFP16ConversionIntrinsics() const override { return false; } + bool initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU, diff --git a/clang/test/CodeGen/builtins-nvptx-native-half-type-err.c b/clang/test/CodeGen/builtins-nvptx-native-half-type-err.c deleted file mode 100644 index 3b9413ddd4a4b..0000000000000 --- a/clang/test/CodeGen/builtins-nvptx-native-half-type-err.c +++ /dev/null @@ -1,119 +0,0 @@ -// REQUIRES: nvptx-registered-target -// -// RUN: not %clang_cc1 -ffp-contract=off -triple nvptx-unknown-unknown -target-cpu \ -// RUN: sm_86 -target-feature +ptx72 -fcuda-is-device -x cuda -emit-llvm -o - %s 2>&1 \ -// RUN: | FileCheck -check-prefix=CHECK_ERROR %s - -#define __device__ __attribute__((device)) -typedef __fp16 __fp16v2 __attribute__((ext_vector_type(2))); - -__device__ void nvvm_native_half_types(void *a, void*b, void*c, __fp16* out) { - __fp16v2 resv2 = {0, 0}; - *out += __nvvm_ex2_approx_f16(*(__fp16 *)a); - resv2 = __nvvm_ex2_approx_f16x2(*(__fp16v2*)a); - - *out += __nvvm_fma_rn_relu_f16(*(__fp16*)a, *(__fp16*)b, *(__fp16*)c); - *out += __nvvm_fma_rn_ftz_relu_f16(*(__fp16*)a, *(__fp16*)b, *(__fp16 *)c); - resv2 += __nvvm_fma_rn_relu_f16x2(*(__fp16v2*)a, *(__fp16v2*)b, *(__fp16v2*)c); - resv2 += __nvvm_fma_rn_ftz_relu_f16x2(*(__fp16v2*)a, *(__fp16v2*)b, *(__fp16v2*)c); - *out += __nvvm_fma_rn_ftz_f16(*(__fp16*)a, *(__fp16*)b, *(__fp16*)c); - *out += __nvvm_fma_rn_sat_f16(*(__fp16*)a, *(__fp16*)b, *(__fp16*)c); - *out += __nvvm_fma_rn_ftz_sat_f16(*(__fp16*)a, *(__fp16*)b, *(__fp16*)c); - resv2 += __nvvm_fma_rn_f16x2(*(__fp16v2*)a, *(__fp16v2*)b, *(__fp16v2*)c); - resv2 += __nvvm_fma_rn_ftz_f16x2(*(__fp16v2*)a, *(__fp16v2*)b, *(__fp16v2*)c); - resv2 += __nvvm_fma_rn_sat_f16x2(*(__fp16v2*)a, *(__fp16v2*)b, *(__fp16v2*)c); - resv2 += __nvvm_fma_rn_ftz_sat_f16x2(*(__fp16v2*)a, *(__fp16v2*)b, *(__fp16v2*)c); - - *out += __nvvm_fmin_f16(*(__fp16*)a, *(__fp16*)b); - *out += __nvvm_fmin_ftz_f16(*(__fp16*)a, *(__fp16*)b); - *out += __nvvm_fmin_nan_f16(*(__fp16*)a, *(__fp16*)b); - *out += __nvvm_fmin_ftz_nan_f16(*(__fp16*)a, *(__fp16*)b); - resv2 += __nvvm_fmin_f16x2(*(__fp16v2*)a , *(__fp16v2*)b); - resv2 += __nvvm_fmin_ftz_f16x2(*(__fp16v2*)a , *(__fp16v2*)b); - resv2 += __nvvm_fmin_nan_f16x2(*(__fp16v2*)a , *(__fp16v2*)b); - resv2 += __nvvm_fmin_ftz_nan_f16x2(*(__fp16v2*)a , *(__fp16v2*)b); - *out += __nvvm_fmin_xorsign_abs_f16(*(__fp16*)a, *(__fp16*)b); - *out += __nvvm_fmin_ftz_xorsign_abs_f16(*(__fp16*)a, *(__fp16*)b); - *out += __nvvm_fmin_nan_xorsign_abs_f16(*(__fp16*)a, *(__fp16*)b); - *out += __nvvm_fmin_ftz_nan_xorsign_abs_f16(*(__fp16*)a, *(__fp16*)b); - resv2 += __nvvm_fmin_xorsign_abs_f16x2(*(__fp16v2*)a, *(__fp16v2*)b); - resv2 += __nvvm_fmin_ftz_xorsign_abs_f16x2(*(__fp16v2*)a, *(__fp16v2*)b); - resv2 += __nvvm_fmin_nan_xorsign_abs_f16x2(*(__fp16v2*)a, *(__fp16v2*)b); - resv2 += __nvvm_fmin_ftz_nan_xorsign_abs_f16x2(*(__fp16v2*)a, *(__fp16v2*)b); - - *out += __nvvm_fmax_f16(*(__fp16*)a, *(__fp16*)b); - *out += __nvvm_fmax_ftz_f16(*(__fp16*)a, *(__fp16*)b); - *out += __nvvm_fmax_nan_f16(*(__fp16*)a, *(__fp16*)b); - *out += __nvvm_fmax_ftz_nan_f16(*(__fp16*)a, *(__fp16*)b); - resv2 += __nvvm_fmax_f16x2(*(__fp16v2*)a , *(__fp16v2*)b); - resv2 += __nvvm_fmax_ftz_f16x2(*(__fp16v2*)a , *(__fp16v2*)b); - resv2 += __nvvm_fmax_nan_f16x2(*(__fp16v2*)a , *(__fp16v2*)b); - resv2 += __nvvm_fmax_ftz_nan_f16x2(*(__fp16v2*)a , *(__fp16v2*)b); - *out += __nvvm_fmax_xorsign_abs_f16(*(__fp16*)a, *(__fp16*)b); - *out += __nvvm_fmax_ftz_xorsign_abs_f16(*(__fp16*)a, *(__fp16*)b); - *out += __nvvm_fmax_nan_xorsign_abs_f16(*(__fp16*)a, *(__fp16*)b); - *out += __nvvm_fmax_ftz_nan_xorsign_abs_f16(*(__fp16*)a, *(__fp16*)b); - resv2 += __nvvm_fmax_xorsign_abs_f16x2(*(__fp16v2*)a, *(__fp16v2*)b); - resv2 += __nvvm_fmax_ftz_xorsign_abs_f16x2(*(__fp16v2*)a, *(__fp16v2*)b); - resv2 += __nvvm_fmax_nan_xorsign_abs_f16x2(*(__fp16v2*)a, *(__fp16v2*)b); - resv2 += __nvvm_fmax_ftz_nan_xorsign_abs_f16x2(*(__fp16v2*)a, *(__fp16v2*)b); - - *out += __nvvm_ldg_h((__fp16 *)a); - resv2 += __nvvm_ldg_h2((__fp16v2 *)a); - - *out += __nvvm_ldu_h((__fp16 *)a); - resv2 += __nvvm_ldu_h2((__fp16v2 *)a); - - *out += resv2[0] + resv2[1]; -} - -// CHECK_ERROR: error: __nvvm_ex2_approx_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_ex2_approx_f16x2 requires native half type support. - -// CHECK_ERROR: error: __nvvm_fma_rn_relu_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fma_rn_ftz_relu_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fma_rn_relu_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fma_rn_ftz_relu_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fma_rn_ftz_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fma_rn_sat_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fma_rn_ftz_sat_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fma_rn_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fma_rn_ftz_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fma_rn_sat_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fma_rn_ftz_sat_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmin_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmin_ftz_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmin_nan_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmin_ftz_nan_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmin_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmin_ftz_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmin_nan_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmin_ftz_nan_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmin_xorsign_abs_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmin_ftz_xorsign_abs_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmin_nan_xorsign_abs_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmin_ftz_nan_xorsign_abs_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmin_xorsign_abs_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmin_ftz_xorsign_abs_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmin_nan_xorsign_abs_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmin_ftz_nan_xorsign_abs_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmax_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmax_ftz_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmax_nan_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmax_ftz_nan_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmax_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmax_ftz_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmax_nan_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmax_ftz_nan_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmax_xorsign_abs_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmax_ftz_xorsign_abs_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmax_nan_xorsign_abs_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmax_ftz_nan_xorsign_abs_f16 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmax_xorsign_abs_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmax_ftz_xorsign_abs_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmax_nan_xorsign_abs_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_fmax_ftz_nan_xorsign_abs_f16x2 requires native half type support. -// CHECK_ERROR: error: __nvvm_ldg_h requires native half type support. -// CHECK_ERROR: error: __nvvm_ldg_h2 requires native half type support. -// CHECK_ERROR: error: __nvvm_ldu_h requires native half type support. -// CHECK_ERROR: error: __nvvm_ldu_h2 requires native half type support. diff --git a/clang/test/CodeGen/builtins-nvptx-native-half-type-native.c b/clang/test/CodeGen/builtins-nvptx-native-half-type-native.c new file mode 100644 index 0000000000000..b594fc876d4b9 --- /dev/null +++ b/clang/test/CodeGen/builtins-nvptx-native-half-type-native.c @@ -0,0 +1,117 @@ +// REQUIRES: nvptx-registered-target +// +// RUN: %clang_cc1 -ffp-contract=off -triple nvptx-unknown-unknown -target-cpu \ +// RUN: sm_86 -target-feature +ptx72 -fcuda-is-device -x cuda -emit-llvm -o - %s \ +// RUN: | FileCheck %s + +#define __device__ __attribute__((device)) +typedef __fp16 __fp16v2 __attribute__((ext_vector_type(2))); + +// CHECK: call half @llvm.nvvm.ex2.approx.f16(half {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.ex2.approx.f16x2(<2 x half> {{.*}}) +// CHECK: call half @llvm.nvvm.fma.rn.relu.f16(half {{.*}}, half {{.*}}, half {{.*}}) +// CHECK: call half @llvm.nvvm.fma.rn.ftz.relu.f16(half {{.*}}, half {{.*}}, half {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fma.rn.relu.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fma.rn.ftz.relu.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call half @llvm.nvvm.fma.rn.ftz.f16(half {{.*}}, half {{.*}}, half {{.*}}) +// CHECK: call half @llvm.nvvm.fma.rn.sat.f16(half {{.*}}, half {{.*}}, half {{.*}}) +// CHECK: call half @llvm.nvvm.fma.rn.ftz.sat.f16(half {{.*}}, half {{.*}}, half {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fma.rn.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fma.rn.ftz.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fma.rn.sat.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fma.rn.ftz.sat.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call half @llvm.nvvm.fmin.f16(half {{.*}}, half {{.*}}) +// CHECK: call half @llvm.nvvm.fmin.ftz.f16(half {{.*}}, half {{.*}}) +// CHECK: call half @llvm.nvvm.fmin.nan.f16(half {{.*}}, half {{.*}}) +// CHECK: call half @llvm.nvvm.fmin.ftz.nan.f16(half {{.*}}, half {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fmin.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fmin.ftz.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fmin.nan.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fmin.ftz.nan.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call half @llvm.nvvm.fmin.xorsign.abs.f16(half {{.*}}, half {{.*}}) +// CHECK: call half @llvm.nvvm.fmin.ftz.xorsign.abs.f16(half {{.*}}, half {{.*}}) +// CHECK: call half @llvm.nvvm.fmin.nan.xorsign.abs.f16(half {{.*}}, half {{.*}}) +// CHECK: call half @llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16(half {{.*}}, half {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fmin.xorsign.abs.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fmin.ftz.xorsign.abs.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fmin.nan.xorsign.abs.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call half @llvm.nvvm.fmax.f16(half {{.*}}, half {{.*}}) +// CHECK: call half @llvm.nvvm.fmax.ftz.f16(half {{.*}}, half {{.*}}) +// CHECK: call half @llvm.nvvm.fmax.nan.f16(half {{.*}}, half {{.*}}) +// CHECK: call half @llvm.nvvm.fmax.ftz.nan.f16(half {{.*}}, half {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fmax.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fmax.ftz.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fmax.nan.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fmax.ftz.nan.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call half @llvm.nvvm.fmax.xorsign.abs.f16(half {{.*}}, half {{.*}}) +// CHECK: call half @llvm.nvvm.fmax.ftz.xorsign.abs.f16(half {{.*}}, half {{.*}}) +// CHECK: call half @llvm.nvvm.fmax.nan.xorsign.abs.f16(half {{.*}}, half {{.*}}) +// CHECK: call half @llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16(half {{.*}}, half {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fmax.xorsign.abs.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fmax.ftz.xorsign.abs.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fmax.nan.xorsign.abs.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call <2 x half> @llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16x2(<2 x half> {{.*}}, <2 x half> {{.*}}) +// CHECK: call half @llvm.nvvm.ldg.global.f.f16.p0(ptr {{.*}}, i32 2) +// CHECK: call <2 x half> @llvm.nvvm.ldg.global.f.v2f16.p0(ptr {{.*}}, i32 4) +// CHECK: call half @llvm.nvvm.ldu.global.f.f16.p0(ptr {{.*}}, i32 2) +// CHECK: call <2 x half> @llvm.nvvm.ldu.global.f.v2f16.p0(ptr {{.*}}, i32 4) +__device__ void nvvm_native_half_types(void *a, void*b, void*c, __fp16* out) { + __fp16v2 resv2 = {0, 0}; + *out += __nvvm_ex2_approx_f16(*(__fp16 *)a); + resv2 = __nvvm_ex2_approx_f16x2(*(__fp16v2*)a); + + *out += __nvvm_fma_rn_relu_f16(*(__fp16*)a, *(__fp16*)b, *(__fp16*)c); + *out += __nvvm_fma_rn_ftz_relu_f16(*(__fp16*)a, *(__fp16*)b, *(__fp16 *)c); + resv2 += __nvvm_fma_rn_relu_f16x2(*(__fp16v2*)a, *(__fp16v2*)b, *(__fp16v2*)c); + resv2 += __nvvm_fma_rn_ftz_relu_f16x2(*(__fp16v2*)a, *(__fp16v2*)b, *(__fp16v2*)c); + *out += __nvvm_fma_rn_ftz_f16(*(__fp16*)a, *(__fp16*)b, *(__fp16*)c); + *out += __nvvm_fma_rn_sat_f16(*(__fp16*)a, *(__fp16*)b, *(__fp16*)c); + *out += __nvvm_fma_rn_ftz_sat_f16(*(__fp16*)a, *(__fp16*)b, *(__fp16*)c); + resv2 += __nvvm_fma_rn_f16x2(*(__fp16v2*)a, *(__fp16v2*)b, *(__fp16v2*)c); + resv2 += __nvvm_fma_rn_ftz_f16x2(*(__fp16v2*)a, *(__fp16v2*)b, *(__fp16v2*)c); + resv2 += __nvvm_fma_rn_sat_f16x2(*(__fp16v2*)a, *(__fp16v2*)b, *(__fp16v2*)c); + resv2 += __nvvm_fma_rn_ftz_sat_f16x2(*(__fp16v2*)a, *(__fp16v2*)b, *(__fp16v2*)c); + + *out += __nvvm_fmin_f16(*(__fp16*)a, *(__fp16*)b); + *out += __nvvm_fmin_ftz_f16(*(__fp16*)a, *(__fp16*)b); + *out += __nvvm_fmin_nan_f16(*(__fp16*)a, *(__fp16*)b); + *out += __nvvm_fmin_ftz_nan_f16(*(__fp16*)a, *(__fp16*)b); + resv2 += __nvvm_fmin_f16x2(*(__fp16v2*)a , *(__fp16v2*)b); + resv2 += __nvvm_fmin_ftz_f16x2(*(__fp16v2*)a , *(__fp16v2*)b); + resv2 += __nvvm_fmin_nan_f16x2(*(__fp16v2*)a , *(__fp16v2*)b); + resv2 += __nvvm_fmin_ftz_nan_f16x2(*(__fp16v2*)a , *(__fp16v2*)b); + *out += __nvvm_fmin_xorsign_abs_f16(*(__fp16*)a, *(__fp16*)b); + *out += __nvvm_fmin_ftz_xorsign_abs_f16(*(__fp16*)a, *(__fp16*)b); + *out += __nvvm_fmin_nan_xorsign_abs_f16(*(__fp16*)a, *(__fp16*)b); + *out += __nvvm_fmin_ftz_nan_xorsign_abs_f16(*(__fp16*)a, *(__fp16*)b); + resv2 += __nvvm_fmin_xorsign_abs_f16x2(*(__fp16v2*)a, *(__fp16v2*)b); + resv2 += __nvvm_fmin_ftz_xorsign_abs_f16x2(*(__fp16v2*)a, *(__fp16v2*)b); + resv2 += __nvvm_fmin_nan_xorsign_abs_f16x2(*(__fp16v2*)a, *(__fp16v2*)b); + resv2 += __nvvm_fmin_ftz_nan_xorsign_abs_f16x2(*(__fp16v2*)a, *(__fp16v2*)b); + + *out += __nvvm_fmax_f16(*(__fp16*)a, *(__fp16*)b); + *out += __nvvm_fmax_ftz_f16(*(__fp16*)a, *(__fp16*)b); + *out += __nvvm_fmax_nan_f16(*(__fp16*)a, *(__fp16*)b); + *out += __nvvm_fmax_ftz_nan_f16(*(__fp16*)a, *(__fp16*)b); + resv2 += __nvvm_fmax_f16x2(*(__fp16v2*)a , *(__fp16v2*)b); + resv2 += __nvvm_fmax_ftz_f16x2(*(__fp16v2*)a , *(__fp16v2*)b); + resv2 += __nvvm_fmax_nan_f16x2(*(__fp16v2*)a , *(__fp16v2*)b); + resv2 += __nvvm_fmax_ftz_nan_f16x2(*(__fp16v2*)a , *(__fp16v2*)b); + *out += __nvvm_fmax_xorsign_abs_f16(*(__fp16*)a, *(__fp16*)b); + *out += __nvvm_fmax_ftz_xorsign_abs_f16(*(__fp16*)a, *(__fp16*)b); + *out += __nvvm_fmax_nan_xorsign_abs_f16(*(__fp16*)a, *(__fp16*)b); + *out += __nvvm_fmax_ftz_nan_xorsign_abs_f16(*(__fp16*)a, *(__fp16*)b); + resv2 += __nvvm_fmax_xorsign_abs_f16x2(*(__fp16v2*)a, *(__fp16v2*)b); + resv2 += __nvvm_fmax_ftz_xorsign_abs_f16x2(*(__fp16v2*)a, *(__fp16v2*)b); + resv2 += __nvvm_fmax_nan_xorsign_abs_f16x2(*(__fp16v2*)a, *(__fp16v2*)b); + resv2 += __nvvm_fmax_ftz_nan_xorsign_abs_f16x2(*(__fp16v2*)a, *(__fp16v2*)b); + + *out += __nvvm_ldg_h((__fp16 *)a); + resv2 += __nvvm_ldg_h2((__fp16v2 *)a); + + *out += __nvvm_ldu_h((__fp16 *)a); + resv2 += __nvvm_ldu_h2((__fp16v2 *)a); + + *out += resv2[0] + resv2[1]; +} _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits