Hi all, This patch is part of a series adding support for Armv8.6-A features. It depends on Arm BFMode patch https://gcc.gnu.org/ml/gcc-patches/2019-12/msg01448.html
This patch implements intrinsics to convert between bfloat16 and float32 formats. ACLE documents are at https://developer.arm.com/docs/101028/latest ISA documents are at https://developer.arm.com/docs/ddi0596/latest Regression tested. Is it OK for trunk please? Thanks, Dennis gcc/ChangeLog: 2020-01-17 Dennis Zhang <dennis.zh...@arm.com> * config/arm/arm_bf16.h (vcvtah_f32_bf16, vcvth_bf16_f32): New. * config/arm/arm_neon.h (vcvt_f32_bf16, vcvtq_low_f32_bf16): New. (vcvtq_high_f32_bf16, vcvt_bf16_f32): New. (vcvtq_low_bf16_f32, vcvtq_high_bf16_f32): New. * config/arm/arm_neon_builtins.def (vbfcvt, vbfcvt_high): New entries. (vbfcvtv4sf, vbfcvtv4sf_high): Likewise. * config/arm/iterators.md (VBFCVT, VBFCVTM): New mode iterators. (V_bf_low, V_bf_cvt_m): New mode attributes. * config/arm/neon.md (neon_vbfcvtv4sf<VBFCVT:mode>): New. (neon_vbfcvtv4sf_highv8bf, neon_vbfcvtsf): New. (neon_vbfcvt<VBFCVT:mode>, neon_vbfcvt_highv8bf): New. (neon_vbfcvtbf_cvtmode<mode>, neon_vbfcvtbf): New * config/arm/unspecs.md (UNSPEC_BFCVT, UNSPEC_BFCVT_HIG): New. gcc/testsuite/ChangeLog: 2020-01-17 Dennis Zhang <dennis.zh...@arm.com> * gcc.target/arm/simd/bf16_cvt_1.c: New test.
diff --git a/gcc/config/arm/arm_bf16.h b/gcc/config/arm/arm_bf16.h index decf23f38346c033f9d7502ce82e11ce81b9bc3a..1aa593192c091850e3ffbe4433d18c0ff543173a 100644 --- a/gcc/config/arm/arm_bf16.h +++ b/gcc/config/arm/arm_bf16.h @@ -34,6 +34,20 @@ extern "C" { typedef __bf16 bfloat16_t; typedef float float32_t; +__extension__ extern __inline float32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtah_f32_bf16 (bfloat16_t __a) +{ + return __builtin_neon_vbfcvtbf (__a); +} + +__extension__ extern __inline bfloat16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvth_bf16_f32 (float32_t __a) +{ + return __builtin_neon_vbfcvtsf (__a); +} + #ifdef __cplusplus } #endif diff --git a/gcc/config/arm/arm_neon.h b/gcc/config/arm/arm_neon.h index 3c78f435009ab027f92693d00ab5b40960d5419d..60ac68702c4f1ef0408c2d0663ebd89bfc6610a2 100644 --- a/gcc/config/arm/arm_neon.h +++ b/gcc/config/arm/arm_neon.h @@ -18745,6 +18745,55 @@ vcmlaq_rot270_laneq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b, #pragma GCC pop_options #endif +#pragma GCC push_options +#pragma GCC target ("arch=armv8.2-a+bf16") + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_f32_bf16 (bfloat16x4_t __a) +{ + return __builtin_neon_vbfcvtv4bf (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_low_f32_bf16 (bfloat16x8_t __a) +{ + return __builtin_neon_vbfcvtv8bf (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_high_f32_bf16 (bfloat16x8_t __a) +{ + return __builtin_neon_vbfcvt_highv8bf (__a); +} + +__extension__ extern __inline bfloat16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_bf16_f32 (float32x4_t __a) +{ + return __builtin_neon_vbfcvtv4sfv4bf (__a); +} + +__extension__ extern __inline bfloat16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_low_bf16_f32 (float32x4_t __a) +{ + return __builtin_neon_vbfcvtv4sfv8bf (__a); +} + +/* The 'inactive' operand is not converted but it provides the + low 64 bits to assemble the final 128-bit result. */ +__extension__ extern __inline bfloat16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_high_bf16_f32 (bfloat16x8_t inactive, float32x4_t __a) +{ + return __builtin_neon_vbfcvtv4sf_highv8bf (inactive, __a); +} + +#pragma GCC pop_options + #ifdef __cplusplus } #endif diff --git a/gcc/config/arm/arm_neon_builtins.def b/gcc/config/arm/arm_neon_builtins.def index e9ff4e501cbb5d16b9211f5bc96db376ddf21afc..bc750895f994bff6799232ef2e63e27c9349e27d 100644 --- a/gcc/config/arm/arm_neon_builtins.def +++ b/gcc/config/arm/arm_neon_builtins.def @@ -373,3 +373,9 @@ VAR2 (MAC_LANE_PAIR, vcmlaq_lane0, v4sf, v8hf) VAR2 (MAC_LANE_PAIR, vcmlaq_lane90, v4sf, v8hf) VAR2 (MAC_LANE_PAIR, vcmlaq_lane180, v4sf, v8hf) VAR2 (MAC_LANE_PAIR, vcmlaq_lane270, v4sf, v8hf) + +VAR2 (UNOP, vbfcvt, sf, bf) +VAR2 (UNOP, vbfcvt, v4bf, v8bf) +VAR1 (UNOP, vbfcvt_high, v8bf) +VAR2 (UNOP, vbfcvtv4sf, v4bf, v8bf) +VAR1 (BINOP, vbfcvtv4sf_high, v8bf) \ No newline at end of file diff --git a/gcc/config/arm/iterators.md b/gcc/config/arm/iterators.md index 33e29509f00a89fa23d0546687c0e4643f0b32d2..003de33bcddcec1c0d9682f775acdedf69c09ea8 100644 --- a/gcc/config/arm/iterators.md +++ b/gcc/config/arm/iterators.md @@ -229,6 +229,10 @@ ;; Modes for polynomial or float values. (define_mode_iterator VPF [V8QI V16QI V2SF V4SF]) +;; Modes for BF16 convert instructions. +(define_mode_iterator VBFCVT [V4BF V8BF]) +(define_mode_iterator VBFCVTM [V2SI SF]) + ;;---------------------------------------------------------------------------- ;; Code iterators ;;---------------------------------------------------------------------------- @@ -732,6 +736,12 @@ (V2SF "") (V4SF "") (DI "_neon") (V2DI "")]) +;; To select the low 64 bits of a vector. +(define_mode_attr V_bf_low [(V4BF "P") (V8BF "e")]) + +;; To generate intermediate modes for BF16 scalar convert. +(define_mode_attr V_bf_cvt_m [(V2SI "BF") (SF "V2SI")]) + ;; Scalars to be presented to scalar multiplication instructions ;; must satisfy the following constraints. diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md index 6087ca6f2badde6a492bb515a2cb5846f3d4ad8e..ba13d1659d6736e6146e16dfd3e3072a04a1de60 100644 --- a/gcc/config/arm/neon.md +++ b/gcc/config/arm/neon.md @@ -6552,3 +6552,80 @@ if (BYTES_BIG_ENDIAN) "vabd.<V_if_elem> %<V_reg>0, %<V_reg>1, %<V_reg>2" [(set_attr "type" "neon_fp_abd_s<q>")] ) + +(define_insn "neon_vbfcvtv4sf<VBFCVT:mode>" + [(set (match_operand:VBFCVT 0 "register_operand" "=w") + (unspec:VBFCVT [(match_operand:V4SF 1 "register_operand" "w")] + UNSPEC_BFCVT))] + "TARGET_BF16_SIMD" + "vcvt.bf16.f32\\t%<V_bf_low>0, %q1" + [(set_attr "type" "neon_fp_cvt_narrow_s_q")] +) + +(define_insn "neon_vbfcvtv4sf_highv8bf" + [(set (match_operand:V8BF 0 "register_operand" "=w") + (unspec:V8BF [(match_operand:V8BF 1 "register_operand" "0") + (match_operand:V4SF 2 "register_operand" "w")] + UNSPEC_BFCVT_HIGH))] + "TARGET_BF16_SIMD" + "vcvt.bf16.f32\\t%f0, %q2" + [(set_attr "type" "neon_fp_cvt_narrow_s_q")] +) + +(define_insn "neon_vbfcvtsf" + [(set (match_operand:BF 0 "register_operand" "=t") + (unspec:BF [(match_operand:SF 1 "register_operand" "t")] + UNSPEC_BFCVT))] + "TARGET_BF16_FP" + "vcvtb.bf16.f32\\t%0, %1" + [(set_attr "type" "f_cvt")] +) + +(define_insn "neon_vbfcvt<VBFCVT:mode>" + [(set (match_operand:V4SF 0 "register_operand" "=w") + (unspec:V4SF [(match_operand:VBFCVT 1 "register_operand" "w")] + UNSPEC_BFCVT))] + "TARGET_BF16_SIMD" + "vshll.u32\\t%q0, %<V_bf_low>1, #16" + [(set_attr "type" "neon_shift_imm_q")] +) + +(define_insn "neon_vbfcvt_highv8bf" + [(set (match_operand:V4SF 0 "register_operand" "=w") + (unspec:V4SF [(match_operand:V8BF 1 "register_operand" "w")] + UNSPEC_BFCVT_HIGH))] + "TARGET_BF16_SIMD" + "vshll.u32\\t%q0, %f1, #16" + [(set_attr "type" "neon_shift_imm_q")] +) + +;; Convert a BF scalar operand to SF via VSHL. +;; VSHL doesn't accept 32-bit registers where the BF and SF scalar operands +;; would be allocated, therefore the operands must be converted to intermediate +;; vectors (i.e. V2SI) in order to apply 64-bit registers. +(define_expand "neon_vbfcvtbf" + [(match_operand:SF 0 "register_operand") + (unspec:SF [(match_operand:BF 1 "register_operand")] UNSPEC_BFCVT)] + "TARGET_BF16_FP" +{ + rtx op0 = gen_reg_rtx (V2SImode); + rtx op1 = gen_reg_rtx (V2SImode); + emit_insn (gen_neon_vbfcvtbf_cvtmodev2si (op1, operands[1])); + emit_insn (gen_neon_vshl_nv2si (op0, op1, gen_int_mode(16, SImode))); + emit_insn (gen_neon_vbfcvtbf_cvtmodesf (operands[0], op0)); + DONE; +}) + +;; Convert BF mode to V2SI and V2SI to SF. +;; Implement this by allocating a 32-bit operand in the low half of a 64-bit +;; register indexed by a 32-bit sub-register number. +;; This will generate reloads but compiler can optimize out the moves. +;; Use 'x' constraint to guarantee the 32-bit sub-registers in an indexable +;; range so that to avoid extra moves. +(define_insn "neon_vbfcvtbf_cvtmode<mode>" + [(set (match_operand:VBFCVTM 0 "register_operand" "=x") + (unspec:VBFCVTM [(match_operand:<V_bf_cvt_m> 1 "register_operand" "0")] + UNSPEC_BFCVT))] + "TARGET_BF16_FP" + "" +) \ No newline at end of file diff --git a/gcc/config/arm/unspecs.md b/gcc/config/arm/unspecs.md index ade6b1af994863a0c2e1fbec0536485a4bbbe119..7bbaf904048a0a57bc96ffe6a985421b4c1f8ab6 100644 --- a/gcc/config/arm/unspecs.md +++ b/gcc/config/arm/unspecs.md @@ -493,4 +493,6 @@ UNSPEC_VCMLA90 UNSPEC_VCMLA180 UNSPEC_VCMLA270 + UNSPEC_BFCVT + UNSPEC_BFCVT_HIGH ]) diff --git a/gcc/testsuite/gcc.target/arm/simd/bf16_cvt_1.c b/gcc/testsuite/gcc.target/arm/simd/bf16_cvt_1.c new file mode 100644 index 0000000000000000000000000000000000000000..c8776ddefd51cae04f05d8bb6f6f061d206e3211 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/simd/bf16_cvt_1.c @@ -0,0 +1,45 @@ +/* { dg-do assemble } */ +/* { dg-require-effective-target arm_v8_2a_bf16_neon_ok } */ +/* { dg-options "-save-temps -O2" } */ +/* { dg-add-options arm_v8_2a_bf16_neon } */ + +#include "arm_neon.h" + +extern void abort(); + +#define TEST0(t, f, a, r) { \ + t f##_out = f (a); \ + if (f##_out != r) \ + abort(); \ +} + +#define TEST1(t, f, a, r, n) { \ + t f##_out = f (a); \ + for (int i = 0; i < n; i++) \ + if (f##_out[i] != r[i]) \ + abort(); \ +} + +int +main() +{ + float32_t f = 1; + float32x4_t f4 = {1, 1, 1, 1}; + + bfloat16_t b = vcvth_bf16_f32(f); + bfloat16x4_t b4 = vcvt_bf16_f32(f4); + bfloat16x8_t l8 = vcvtq_low_bf16_f32(f4); + bfloat16x8_t b8 = vcvtq_high_bf16_f32(l8, f4); + + TEST0(float32_t, vcvtah_f32_bf16, b, f); + TEST1(float32x4_t, vcvt_f32_bf16, b4, f4, 4); + TEST1(float32x4_t, vcvtq_low_f32_bf16, b8, f4, 4); + TEST1(float32x4_t, vcvtq_high_f32_bf16, b8, f4, 4); + + return 0; +} + +/* { dg-final { scan-assembler-times {vcvtb.bf16.f32\ts[0-9]+, s[0-9]+\n} 1 } } */ +/* { dg-final { scan-assembler-times {vcvt.bf16.f32\td[0-9]+, q[0-9]+\n} 3 } } */ +/* { dg-final { scan-assembler-times {vshl.i32\td[0-9]+, d[0-9]+, #16} 1 } } */ +/* { dg-final { scan-assembler-times {vshll.u32\tq[0-9]+, d[0-9]+, #16} 3 } } */ \ No newline at end of file