Matthew Malcomson <matthew.malcom...@arm.com> writes: > --- a/gcc/config/aarch64/aarch64-protos.h > +++ b/gcc/config/aarch64/aarch64-protos.h > @@ -780,6 +780,7 @@ extern const atomic_ool_names aarch64_ool_ldeor_names; > > tree aarch64_resolve_overloaded_builtin_general (location_t, tree, void *); > > +const char * aarch64_sls_barrier (int);
Should be no space after the “*”. > extern bool aarch64_harden_sls_retbr_p (void); > extern bool aarch64_harden_sls_blr_p (void); > > diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h > index > 24767c747bab0d711627c5c646937c42f210d70b..5da3d94e335fc315e1d90e6a674f2f09cf1a4529 > 100644 > --- a/gcc/config/aarch64/aarch64.h > +++ b/gcc/config/aarch64/aarch64.h > @@ -281,6 +281,7 @@ extern unsigned aarch64_architecture_version; > #define AARCH64_ISA_F32MM (aarch64_isa_flags & AARCH64_FL_F32MM) > #define AARCH64_ISA_F64MM (aarch64_isa_flags & AARCH64_FL_F64MM) > #define AARCH64_ISA_BF16 (aarch64_isa_flags & AARCH64_FL_BF16) > +#define AARCH64_ISA_SB (aarch64_isa_flags & AARCH64_FL_SB) > > /* Crypto is an optional extension to AdvSIMD. */ > #define TARGET_CRYPTO (TARGET_SIMD && AARCH64_ISA_CRYPTO) > @@ -378,6 +379,9 @@ extern unsigned aarch64_architecture_version; > #define TARGET_FIX_ERR_A53_835769_DEFAULT 1 > #endif > > +/* SB instruction is enabled through +sb. */ > +#define TARGET_SB (AARCH64_ISA_SB) > + > /* Apply the workaround for Cortex-A53 erratum 835769. */ > #define TARGET_FIX_ERR_A53_835769 \ > ((aarch64_fix_a53_err835769 == 2) \ > @@ -1058,8 +1062,11 @@ typedef struct > > #define RETURN_ADDR_RTX aarch64_return_addr > > -/* BTI c + 3 insns + 2 pointer-sized entries. */ > -#define TRAMPOLINE_SIZE (TARGET_ILP32 ? 24 : 32) > +/* BTI c + 3 insns > + + sls barrier of DSB + ISB. > + + 2 pointer-sized entries. */ > +#define TRAMPOLINE_SIZE (24 \ > + + (TARGET_ILP32 ? 8 : 16)) Personal taste, sorry, but IMO this is easier to read on one line. > > /* Trampolines contain dwords, so must be dword aligned. */ > #define TRAMPOLINE_ALIGNMENT 64 > diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c > index > 775f49991e5f599a843d3ef490b8cd044acfe78f..9356937fe266c68196392a1589b3cf96607de104 > 100644 > --- a/gcc/config/aarch64/aarch64.c > +++ b/gcc/config/aarch64/aarch64.c > @@ -10822,8 +10822,8 @@ aarch64_return_addr (int count, rtx frame > ATTRIBUTE_UNUSED) > static void > aarch64_asm_trampoline_template (FILE *f) > { > - int offset1 = 16; > - int offset2 = 20; > + int offset1 = 24; > + int offset2 = 28; Huh, the offset handling in this function is a bit twisty, but that's not your fault :-) > […] > @@ -11054,6 +11065,7 @@ aarch64_output_casesi (rtx *operands) > output_asm_insn (buf, operands); > output_asm_insn (patterns[index][1], operands); > output_asm_insn ("br\t%3", operands); > + output_asm_insn (aarch64_sls_barrier (aarch64_harden_sls_retbr_p ()), > operands); Long line. > […] > diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md > index > ff15505d45546124868d2531b7f4e5b0f1f5bebc..75ef87a3b4674cc73cb42cc82cfb8e782acf77f6 > 100644 > --- a/gcc/config/aarch64/aarch64.md > +++ b/gcc/config/aarch64/aarch64.md > @@ -447,8 +447,15 @@ > (define_insn "indirect_jump" > [(set (pc) (match_operand:DI 0 "register_operand" "r"))] > "" > - "br\\t%0" > - [(set_attr "type" "branch")] > + { > + output_asm_insn ("br\\t%0", operands); > + return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ()); > + } > + [(set_attr "type" "branch") > + (set (attr "length") > + (cond [(match_test "!aarch64_harden_sls_retbr_p ()") (const_int 4) > + (match_test "TARGET_SB") (const_int 8)] > + (const_int 12)))] Rather than duplicating this several times, I think it would be better to add a new attribute like “sls_mitigation”, set that attribute in the define_insns, and then use “sls_mitigation” in the default “length” calculation. See e.g. what rth did with “movprfx”. > […] > diff --git > a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c > b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c > new file mode 100644 > index > 0000000000000000000000000000000000000000..11f614b4ef2eb0fa3707cb46a55583d6685b89d0 > --- /dev/null > +++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c > @@ -0,0 +1,19 @@ > +/* { dg-do compile } */ > +/* { dg-additional-options "-mharden-sls=retbr -mbranch-protection=pac-ret > -march=armv8.3-a" } */ > + > +/* Testing the do_return pattern for retaa and retab. */ > +long retbr_subcall(void); > +long retbr_do_return_retaa(void) > +{ > + return retbr_subcall()+1; > +} > +__attribute__((target("branch-protection=pac-ret+b-key"))) > +long retbr_do_return_retab(void) > +{ > + return retbr_subcall()+1; > +} > + > +/* Ensure there are no BR or RET instructions which are not directly followed > + by a speculation barrier. */ > +/* { dg-final { scan-assembler-not > "\t(br|ret|retaa|retab)\tx\[0-9\]\[0-9\]?\n\t(?!dsb\tsy\n\tisb|sb)" } } */ Isn't the “sb” alternative invalid given the -march option? Probably slightly easier to read if the regexp is quoted using {…} rather than "…". Same for the other tests. > […] > +/* { dg-final { scan-assembler-not "ret\t" } } */ > diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c > b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c > new file mode 100644 > index > 0000000000000000000000000000000000000000..5cd4da6bbb719a5135faa2c9818dc873e3d5af70 > --- /dev/null > +++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c > […] > +/* Testing the indirect_jump pattern. */ > +typedef signed __attribute__((mode(DI))) intptr_t; Just to check, have you tested this with -mabi=ilp32? Looks like it'll probably be OK, was just suspicious because this isn't “intptr_t” there. xp b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp > new file mode 100644 > index > 0000000000000000000000000000000000000000..fb63c6dfe230e64b11919381c30a3a05eee52e16 > --- /dev/null > +++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp > @@ -0,0 +1,73 @@ > +# Regression driver for SLS mitigation on AArch64. > +# Copyright (C) 2020-2020 Free Software Foundation, Inc. Just 2020. Thanks, Richard