I've backported the following SVE ACLE and stack-protector patches to GCC 10. The arm one was approved last week.
Tested on aarch64-linux-gnu and arm-linux-gnueabihf. Richard
>From 0559badf0176b257d3cba89f8eb4b08948216002 Mon Sep 17 00:00:00 2001 From: Richard Sandiford <richard.sandif...@arm.com> Date: Tue, 29 Sep 2020 11:22:03 +0100 Subject: [PATCH 1/5] aarch64: Update the mangling of single SVE vectors and predicates GCC was implementing an old mangling scheme for single SVE vectors and predicates (based on the Advanced SIMD one). The final definition instead put them in the vendor built-in namespace via the "u" prefix. gcc/ * config/aarch64/aarch64-sve-builtins.cc (DEF_SVE_TYPE): Add a leading "u" to each mangled name. gcc/testsuite/ * g++.target/aarch64/sve/acle/general-c++/mangle_1.C: Add a leading "u" to the mangling of each SVE vector and predicate type. * g++.target/aarch64/sve/acle/general-c++/mangle_2.C: Likewise. * g++.target/aarch64/sve/acle/general-c++/mangle_3.C: Likewise. * g++.target/aarch64/sve/acle/general-c++/mangle_5.C: Likewise. (cherry picked from commit dcb043351307001a85fc1e7d56669f5adc9628f7) --- gcc/config/aarch64/aarch64-sve-builtins.cc | 2 +- .../aarch64/sve/acle/general-c++/mangle_1.C | 26 +++++++++---------- .../aarch64/sve/acle/general-c++/mangle_2.C | 26 +++++++++---------- .../aarch64/sve/acle/general-c++/mangle_3.C | 4 +-- .../aarch64/sve/acle/general-c++/mangle_5.C | 4 +-- 5 files changed, 31 insertions(+), 31 deletions(-) diff --git a/gcc/config/aarch64/aarch64-sve-builtins.cc b/gcc/config/aarch64/aarch64-sve-builtins.cc index bdb04e8170d..820cc9f7e17 100644 --- a/gcc/config/aarch64/aarch64-sve-builtins.cc +++ b/gcc/config/aarch64/aarch64-sve-builtins.cc @@ -101,7 +101,7 @@ struct registered_function_hasher : nofree_ptr_hash <registered_function> /* Information about each single-predicate or single-vector type. */ static CONSTEXPR const vector_type_info vector_types[] = { #define DEF_SVE_TYPE(ACLE_NAME, NCHARS, ABI_NAME, SCALAR_TYPE) \ - { #ACLE_NAME, #ABI_NAME, #NCHARS #ABI_NAME }, + { #ACLE_NAME, #ABI_NAME, "u" #NCHARS #ABI_NAME }, #include "aarch64-sve-builtins.def" }; diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_1.C b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_1.C index 1a171248585..36dab3c9b71 100644 --- a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_1.C +++ b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_1.C @@ -16,16 +16,16 @@ void f11(svfloat32_t) {} void f12(svfloat64_t) {} void f13(svbfloat16_t) {} -/* { dg-final { scan-assembler "_Z2f110__SVBool_t:" } } */ -/* { dg-final { scan-assembler "_Z2f210__SVInt8_t:" } } */ -/* { dg-final { scan-assembler "_Z2f311__SVInt16_t:" } } */ -/* { dg-final { scan-assembler "_Z2f411__SVInt32_t:" } } */ -/* { dg-final { scan-assembler "_Z2f511__SVInt64_t:" } } */ -/* { dg-final { scan-assembler "_Z2f611__SVUint8_t:" } } */ -/* { dg-final { scan-assembler "_Z2f712__SVUint16_t:" } } */ -/* { dg-final { scan-assembler "_Z2f812__SVUint32_t:" } } */ -/* { dg-final { scan-assembler "_Z2f912__SVUint64_t:" } } */ -/* { dg-final { scan-assembler "_Z3f1013__SVFloat16_t:" } } */ -/* { dg-final { scan-assembler "_Z3f1113__SVFloat32_t:" } } */ -/* { dg-final { scan-assembler "_Z3f1213__SVFloat64_t:" } } */ -/* { dg-final { scan-assembler "_Z3f1314__SVBfloat16_t:" } } */ +/* { dg-final { scan-assembler "_Z2f1u10__SVBool_t:" } } */ +/* { dg-final { scan-assembler "_Z2f2u10__SVInt8_t:" } } */ +/* { dg-final { scan-assembler "_Z2f3u11__SVInt16_t:" } } */ +/* { dg-final { scan-assembler "_Z2f4u11__SVInt32_t:" } } */ +/* { dg-final { scan-assembler "_Z2f5u11__SVInt64_t:" } } */ +/* { dg-final { scan-assembler "_Z2f6u11__SVUint8_t:" } } */ +/* { dg-final { scan-assembler "_Z2f7u12__SVUint16_t:" } } */ +/* { dg-final { scan-assembler "_Z2f8u12__SVUint32_t:" } } */ +/* { dg-final { scan-assembler "_Z2f9u12__SVUint64_t:" } } */ +/* { dg-final { scan-assembler "_Z3f10u13__SVFloat16_t:" } } */ +/* { dg-final { scan-assembler "_Z3f11u13__SVFloat32_t:" } } */ +/* { dg-final { scan-assembler "_Z3f12u13__SVFloat64_t:" } } */ +/* { dg-final { scan-assembler "_Z3f13u14__SVBfloat16_t:" } } */ diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_2.C b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_2.C index 6792b8a3133..ad4aaee291f 100644 --- a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_2.C +++ b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_2.C @@ -14,16 +14,16 @@ void f11(__SVFloat32_t) {} void f12(__SVFloat64_t) {} void f13(__SVBfloat16_t) {} -/* { dg-final { scan-assembler "_Z2f110__SVBool_t:" } } */ -/* { dg-final { scan-assembler "_Z2f210__SVInt8_t:" } } */ -/* { dg-final { scan-assembler "_Z2f311__SVInt16_t:" } } */ -/* { dg-final { scan-assembler "_Z2f411__SVInt32_t:" } } */ -/* { dg-final { scan-assembler "_Z2f511__SVInt64_t:" } } */ -/* { dg-final { scan-assembler "_Z2f611__SVUint8_t:" } } */ -/* { dg-final { scan-assembler "_Z2f712__SVUint16_t:" } } */ -/* { dg-final { scan-assembler "_Z2f812__SVUint32_t:" } } */ -/* { dg-final { scan-assembler "_Z2f912__SVUint64_t:" } } */ -/* { dg-final { scan-assembler "_Z3f1013__SVFloat16_t:" } } */ -/* { dg-final { scan-assembler "_Z3f1113__SVFloat32_t:" } } */ -/* { dg-final { scan-assembler "_Z3f1213__SVFloat64_t:" } } */ -/* { dg-final { scan-assembler "_Z3f1314__SVBfloat16_t:" } } */ +/* { dg-final { scan-assembler "_Z2f1u10__SVBool_t:" } } */ +/* { dg-final { scan-assembler "_Z2f2u10__SVInt8_t:" } } */ +/* { dg-final { scan-assembler "_Z2f3u11__SVInt16_t:" } } */ +/* { dg-final { scan-assembler "_Z2f4u11__SVInt32_t:" } } */ +/* { dg-final { scan-assembler "_Z2f5u11__SVInt64_t:" } } */ +/* { dg-final { scan-assembler "_Z2f6u11__SVUint8_t:" } } */ +/* { dg-final { scan-assembler "_Z2f7u12__SVUint16_t:" } } */ +/* { dg-final { scan-assembler "_Z2f8u12__SVUint32_t:" } } */ +/* { dg-final { scan-assembler "_Z2f9u12__SVUint64_t:" } } */ +/* { dg-final { scan-assembler "_Z3f10u13__SVFloat16_t:" } } */ +/* { dg-final { scan-assembler "_Z3f11u13__SVFloat32_t:" } } */ +/* { dg-final { scan-assembler "_Z3f12u13__SVFloat64_t:" } } */ +/* { dg-final { scan-assembler "_Z3f13u14__SVBfloat16_t:" } } */ diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_3.C b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_3.C index 8f64f7c2ee2..7aaafeb71eb 100644 --- a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_3.C +++ b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_3.C @@ -13,6 +13,6 @@ void f2(t2) {} void f3(t3) {} void f4(t1 &a, t2 &b, t3 &c) { a = b = c; } -/* { dg-final { scan-assembler "_Z2f110__SVInt8_t:" } } */ -/* { dg-final { scan-assembler "_Z2f210__SVInt8_t:" } } */ +/* { dg-final { scan-assembler "_Z2f1u10__SVInt8_t:" } } */ +/* { dg-final { scan-assembler "_Z2f2u10__SVInt8_t:" } } */ /* { dg-final { scan-assembler "_Z2f3Dv32_a:" } } */ diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_5.C b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_5.C index 47c1160d65a..1504cc12f41 100644 --- a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_5.C +++ b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_5.C @@ -4,5 +4,5 @@ typedef volatile foo bar; foo f (foo x) { return x; } bar g (bar x) { return x; } -/* { dg-final { scan-assembler {_Z1f10__SVInt8_t:\n} } } */ -/* { dg-final { scan-assembler {_Z1g10__SVInt8_t:\n} } } */ +/* { dg-final { scan-assembler {_Z1fu10__SVInt8_t:\n} } } */ +/* { dg-final { scan-assembler {_Z1gu10__SVInt8_t:\n} } } */ -- 2.17.1
>From b23cece47481f7a2c7577b0a8fd6207baf809f9d Mon Sep 17 00:00:00 2001 From: Richard Sandiford <richard.sandif...@arm.com> Date: Tue, 29 Sep 2020 11:22:04 +0100 Subject: [PATCH 2/5] aarch64: Tweaks to the handling of fixed-length SVE types This patch is really four things rolled into one, since separating them seemed artificial: - Update the mangling of the fixed-length SVE ACLE types to match the upcoming spec. The idea is to mangle: VLAT __attribute__((arm_sve_vector_bits(N))) as an instance __SVE_VLS<VLAT, N> of the template: __SVE_VLS<typename, unsigned> - Give the fixed-length types their own TYPE_DECL. This is needed to make the above mangling fix work, but should also be a minor QoI improvement for error reporting. Unfortunately, the names are quite verbose, e.g.: svint8_t __attribute__((arm_sve_vector_bits(512))) but anything shorter would be ad-hoc syntax and so might be more confusing. - Improve the error message reported when arm_sve_vector_bits is applied to tuples, such as: svint32x2_t __attribute__((arm_sve_vector_bits(N))) Previously we would complain that the type isn't an SVE type; now we complain that it isn't a vector type. - Don't allow arm_sve_vector_bits(N) to be applied to existing fixed-length SVE types. gcc/ * config/aarch64/aarch64-sve-builtins.cc (add_sve_type_attribute): Take the ACLE name of the type as a parameter and add it as fourth argument to the "SVE type" attribute. (register_builtin_types): Update call accordingly. (register_tuple_type): Likewise. Construct the name of the type earlier in order to do this. (get_arm_sve_vector_bits_attributes): New function. (handle_arm_sve_vector_bits_attribute): Report a more sensible error message if the attribute is applied to an SVE tuple type. Don't allow the attribute to be applied to an existing fixed-length SVE type. Mangle the new type as __SVE_VLS<type, vector-bits>. Add a dummy TYPE_DECL to the new type. gcc/testsuite/ * g++.target/aarch64/sve/acle/general-c++/attributes_2.C: New test. * g++.target/aarch64/sve/acle/general-c++/mangle_6.C: Likewise. * g++.target/aarch64/sve/acle/general-c++/mangle_7.C: Likewise. * g++.target/aarch64/sve/acle/general-c++/mangle_8.C: Likewise. * g++.target/aarch64/sve/acle/general-c++/mangle_9.C: Likewise. * g++.target/aarch64/sve/acle/general-c++/mangle_10.C: Likewise. * gcc.target/aarch64/sve/acle/general/attributes_7.c: Check the error messages reported when arm_sve_vector_bits is applied to SVE tuple types or to existing fixed-length SVE types. (cherry picked from commit 9ded41a39c1bb29f356485a9ec3a573fb75ded12) --- gcc/config/aarch64/aarch64-sve-builtins.cc | 130 +++++++++++++++--- .../sve/acle/general-c++/attributes_2.C | 66 +++++++++ .../aarch64/sve/acle/general-c++/mangle_10.C | 19 +++ .../aarch64/sve/acle/general-c++/mangle_6.C | 36 +++++ .../aarch64/sve/acle/general-c++/mangle_7.C | 19 +++ .../aarch64/sve/acle/general-c++/mangle_8.C | 19 +++ .../aarch64/sve/acle/general-c++/mangle_9.C | 19 +++ .../aarch64/sve/acle/general/attributes_7.c | 4 + 8 files changed, 295 insertions(+), 17 deletions(-) create mode 100644 gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/attributes_2.C create mode 100644 gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_10.C create mode 100644 gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_6.C create mode 100644 gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_7.C create mode 100644 gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_8.C create mode 100644 gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_9.C diff --git a/gcc/config/aarch64/aarch64-sve-builtins.cc b/gcc/config/aarch64/aarch64-sve-builtins.cc index 820cc9f7e17..4473f26a651 100644 --- a/gcc/config/aarch64/aarch64-sve-builtins.cc +++ b/gcc/config/aarch64/aarch64-sve-builtins.cc @@ -564,15 +564,16 @@ static bool reported_missing_registers_p; /* Record that TYPE is an ABI-defined SVE type that contains NUM_ZR SVE vectors and NUM_PR SVE predicates. MANGLED_NAME, if nonnull, is the ABI-defined - mangling of the type. */ + mangling of the type. ACLE_NAME is the <arm_sve.h> name of the type. */ static void add_sve_type_attribute (tree type, unsigned int num_zr, unsigned int num_pr, - const char *mangled_name) + const char *mangled_name, const char *acle_name) { tree mangled_name_tree = (mangled_name ? get_identifier (mangled_name) : NULL_TREE); - tree value = tree_cons (NULL_TREE, mangled_name_tree, NULL_TREE); + tree value = tree_cons (NULL_TREE, get_identifier (acle_name), NULL_TREE); + value = tree_cons (NULL_TREE, mangled_name_tree, value); value = tree_cons (NULL_TREE, size_int (num_pr), value); value = tree_cons (NULL_TREE, size_int (num_zr), value); TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("SVE type"), value, @@ -3359,7 +3360,8 @@ register_builtin_types () TYPE_ARTIFICIAL (vectype) = 1; TYPE_INDIVISIBLE_P (vectype) = 1; add_sve_type_attribute (vectype, num_zr, num_pr, - vector_types[i].mangled_name); + vector_types[i].mangled_name, + vector_types[i].acle_name); make_type_sizeless (vectype); abi_vector_types[i] = vectype; lang_hooks.types.register_builtin_type (vectype, @@ -3405,6 +3407,13 @@ register_tuple_type (unsigned int num_vectors, vector_type_index type) { tree tuple_type = lang_hooks.types.make_type (RECORD_TYPE); + /* Work out the structure name. */ + char buffer[sizeof ("svbfloat16x4_t")]; + const char *vector_type_name = vector_types[type].acle_name; + snprintf (buffer, sizeof (buffer), "%.*sx%d_t", + (int) strlen (vector_type_name) - 2, vector_type_name, + num_vectors); + /* The contents of the type are opaque, so we can define them in any way that maps to the correct ABI type. @@ -3428,20 +3437,13 @@ register_tuple_type (unsigned int num_vectors, vector_type_index type) get_identifier ("__val"), array_type); DECL_FIELD_CONTEXT (field) = tuple_type; TYPE_FIELDS (tuple_type) = field; - add_sve_type_attribute (tuple_type, num_vectors, 0, NULL); + add_sve_type_attribute (tuple_type, num_vectors, 0, NULL, buffer); make_type_sizeless (tuple_type); layout_type (tuple_type); gcc_assert (VECTOR_MODE_P (TYPE_MODE (tuple_type)) && TYPE_MODE_RAW (tuple_type) == TYPE_MODE (tuple_type) && TYPE_ALIGN (tuple_type) == 128); - /* Work out the structure name. */ - char buffer[sizeof ("svbfloat16x4_t")]; - const char *vector_type_name = vector_types[type].acle_name; - snprintf (buffer, sizeof (buffer), "%.*sx%d_t", - (int) strlen (vector_type_name) - 2, vector_type_name, - num_vectors); - tree decl = build_decl (input_location, TYPE_DECL, get_identifier (buffer), tuple_type); TYPE_NAME (tuple_type) = decl; @@ -3642,6 +3644,29 @@ builtin_type_p (const_tree type, unsigned int *num_zr, unsigned int *num_pr) return false; } +/* ATTRS is the attribute list for a sizeless SVE type. Return the + attributes of the associated fixed-length SVE type, taking the + "SVE type" attributes from NEW_SVE_TYPE_ARGS. */ +static tree +get_arm_sve_vector_bits_attributes (tree old_attrs, tree new_sve_type_args) +{ + tree new_attrs = NULL_TREE; + tree *ptr = &new_attrs; + for (tree attr = old_attrs; attr; attr = TREE_CHAIN (attr)) + { + tree name = get_attribute_name (attr); + if (is_attribute_p ("SVE sizeless type", name)) + continue; + + tree args = TREE_VALUE (attr); + if (is_attribute_p ("SVE type", name)) + args = new_sve_type_args; + *ptr = tree_cons (TREE_PURPOSE (attr), args, NULL_TREE); + ptr = &TREE_CHAIN (*ptr); + } + return new_attrs; +} + /* An attribute callback for the "arm_sve_vector_bits" attribute. */ tree handle_arm_sve_vector_bits_attribute (tree *node, tree, tree args, int, @@ -3650,12 +3675,27 @@ handle_arm_sve_vector_bits_attribute (tree *node, tree, tree args, int, *no_add_attrs = true; tree type = *node; - if (!VECTOR_TYPE_P (type) || !builtin_type_p (type)) + tree attr = lookup_sve_type_attribute (type); + if (!attr) { error ("%qs applied to non-SVE type %qT", "arm_sve_vector_bits", type); return NULL_TREE; } + if (!VECTOR_TYPE_P (type)) + { + error ("%qs applied to non-vector type %qT", + "arm_sve_vector_bits", type); + return NULL_TREE; + } + + if (!sizeless_type_p (type)) + { + error ("%qs applied to type %qT, which already has a size", + "arm_sve_vector_bits", type); + return NULL_TREE; + } + tree size = TREE_VALUE (args); if (TREE_CODE (size) != INTEGER_CST) { @@ -3671,6 +3711,23 @@ handle_arm_sve_vector_bits_attribute (tree *node, tree, tree args, int, return NULL_TREE; } + /* Construct a new list of "SVE type" attribute arguments. */ + tree new_sve_type_args = copy_list (TREE_VALUE (attr)); + + /* Mangle the type as an instance of the imaginary template: + + __SVE_VLS<typename, unsigned> + + where the first parameter is the SVE type and where the second + parameter is the SVE vector length in bits. */ + tree mangled_name_node = chain_index (2, new_sve_type_args); + const char *old_mangled_name + = IDENTIFIER_POINTER (TREE_VALUE (mangled_name_node)); + char *new_mangled_name + = xasprintf ("9__SVE_VLSI%sLj%dEE", old_mangled_name, (int) value); + TREE_VALUE (mangled_name_node) = get_identifier (new_mangled_name); + free (new_mangled_name); + /* FIXME: The type ought to be a distinct copy in all cases, but currently that makes the C frontend reject conversions between svbool_t and its fixed-length variants. Using a type variant @@ -3683,6 +3740,44 @@ handle_arm_sve_vector_bits_attribute (tree *node, tree, tree args, int, else new_type = build_distinct_type_copy (base_type); + /* Construct a TYPE_DECL for the new type. This serves two purposes: + + - It ensures we don't print the original TYPE_DECL in error messages. + Printing the original name would be confusing because there are + situations in which the distinction between the original type and + the new type matters. For example: + + __SVInt8_t __attribute__((arm_sve_vector_bits(512))) *a; + __SVInt8_t *b; + + a = b; + + is invalid in C++, but without this, we'd print both types in + the same way. + + - Having a separate TYPE_DECL is necessary to ensure that C++ + mangling works correctly. See mangle_builtin_type for details. + + The name of the decl is something like: + + svint8_t __attribute__((arm_sve_vector_bits(512))) + + This is a compromise. It would be more accurate to use something like: + + __SVInt8_t __attribute__((arm_sve_vector_bits(512))) + + but the <arm_sve.h> name is likely to be more meaningful. */ + tree acle_name_node = TREE_CHAIN (mangled_name_node); + const char *old_type_name = IDENTIFIER_POINTER (TREE_VALUE (acle_name_node)); + char *new_type_name + = xasprintf ("%s __attribute__((arm_sve_vector_bits(%d)))", + old_type_name, (int) value); + tree decl = build_decl (BUILTINS_LOCATION, TYPE_DECL, + get_identifier (new_type_name), new_type); + DECL_ARTIFICIAL (decl) = 1; + TYPE_NAME (new_type) = decl; + free (new_type_name); + /* Allow the GNU vector extensions to be applied to vectors. The extensions aren't yet defined for packed predicates, so continue to treat them as abstract entities for now. */ @@ -3692,16 +3787,17 @@ handle_arm_sve_vector_bits_attribute (tree *node, tree, tree args, int, /* The new type is a normal sized type; it doesn't have the same restrictions as sizeless types. */ TYPE_ATTRIBUTES (new_type) - = remove_attribute ("SVE sizeless type", - copy_list (TYPE_ATTRIBUTES (new_type))); + = get_arm_sve_vector_bits_attributes (TYPE_ATTRIBUTES (new_type), + new_sve_type_args); /* Apply the relevant attributes, qualifiers and alignment of TYPE, if they differ from the original (sizeless) BASE_TYPE. */ if (TYPE_ATTRIBUTES (base_type) != TYPE_ATTRIBUTES (type) || TYPE_QUALS (base_type) != TYPE_QUALS (type)) { - tree attrs = remove_attribute ("SVE sizeless type", - copy_list (TYPE_ATTRIBUTES (type))); + tree attrs + = get_arm_sve_vector_bits_attributes (TYPE_ATTRIBUTES (type), + new_sve_type_args); new_type = build_type_attribute_qual_variant (new_type, attrs, TYPE_QUALS (type)); } diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/attributes_2.C b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/attributes_2.C new file mode 100644 index 00000000000..b55be02ecca --- /dev/null +++ b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/attributes_2.C @@ -0,0 +1,66 @@ +// { dg-compile } +// { dg-additional-options "-msve-vector-bits=512" } + +#include <arm_sve.h> + +typedef svint8_t vec8 __attribute__((arm_sve_vector_bits(512))); +typedef vec8 *vec8_ptr; + +typedef svint8_t my_vec; + +typedef vec8 bad_vec8_a __attribute__((arm_sve_vector_bits(512))); // { dg-error {'arm_sve_vector_bits' applied to type 'vec8' {aka 'svint8_t __attribute__\(\(arm_sve_vector_bits\(512\)\)\)'}, which already has a size} } +typedef svint8_t bad_vec8_b __attribute__((arm_sve_vector_bits(512))) __attribute__((arm_sve_vector_bits(512))); // { dg-error {'arm_sve_vector_bits' applied to type 'svint8_t __attribute__\(\(arm_sve_vector_bits\(512\)\)\)', which already has a size} } + +svint8_t *vla1; +__SVInt8_t *vla2; + +vec8 *vls1; +svint8_t (__attribute__((arm_sve_vector_bits(512))) *vls2); +__SVInt8_t (__attribute__((arm_sve_vector_bits(512))) *vls3); +vec8_ptr vls4; +my_vec (__attribute__((arm_sve_vector_bits(512))) *vls5); + +void +f (void) +{ + vls1 = vla1; // { dg-error {invalid conversion from 'svint8_t\*' to 'vec8\*' {aka 'svint8_t __attribute__\(\(arm_sve_vector_bits\(512\)\)\)\*'}} } + vls1 = vla2; // { dg-error {invalid conversion from '__SVInt8_t\*' to 'vec8\*' {aka 'svint8_t __attribute__\(\(arm_sve_vector_bits\(512\)\)\)\*'}} } + + vls2 = vla1; // { dg-error {invalid conversion from 'svint8_t\*' to 'svint8_t __attribute__\(\(arm_sve_vector_bits\(512\)\)\)\*'} } + vls2 = vla2; // { dg-error {invalid conversion from '__SVInt8_t\*' to 'svint8_t __attribute__\(\(arm_sve_vector_bits\(512\)\)\)\*'} } + + vls3 = vla1; // { dg-error {invalid conversion from 'svint8_t\*' to 'svint8_t __attribute__\(\(arm_sve_vector_bits\(512\)\)\)\*'} } + vls3 = vla2; // { dg-error {invalid conversion from '__SVInt8_t\*' to 'svint8_t __attribute__\(\(arm_sve_vector_bits\(512\)\)\)\*'} } + + vls4 = vla1; // { dg-error {invalid conversion from 'svint8_t\*' to 'vec8_ptr' {aka 'svint8_t __attribute__\(\(arm_sve_vector_bits\(512\)\)\)\*'}} } + vls4 = vla2; // { dg-error {invalid conversion from '__SVInt8_t\*' to 'vec8_ptr' {aka 'svint8_t __attribute__\(\(arm_sve_vector_bits\(512\)\)\)\*'}} } + + vls5 = vla1; // { dg-error {invalid conversion from 'svint8_t\*' to 'svint8_t __attribute__\(\(arm_sve_vector_bits\(512\)\)\)\*'} } + vls5 = vla2; // { dg-error {invalid conversion from '__SVInt8_t\*' to 'svint8_t __attribute__\(\(arm_sve_vector_bits\(512\)\)\)\*'} } + + vla1 = vla1; + vla1 = vla2; + + vla2 = vla1; + vla2 = vla2; + + vls1 = vls1; + vls1 = vls2; + vls1 = vls3; + vls1 = vls4; + + vls2 = vls1; + vls2 = vls2; + vls2 = vls3; + vls2 = vls4; + + vls3 = vls1; + vls3 = vls2; + vls3 = vls3; + vls3 = vls4; + + vls4 = vls1; + vls4 = vls2; + vls4 = vls3; + vls4 = vls4; +} diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_10.C b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_10.C new file mode 100644 index 00000000000..5ff2d2e5522 --- /dev/null +++ b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_10.C @@ -0,0 +1,19 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target aarch64_little_endian } */ +/* { dg-additional-options "-msve-vector-bits=2048" } */ + +#include "mangle_6.C" + +/* { dg-final { scan-assembler "_Z2f19__SVE_VLSIu10__SVBool_tLj2048EE:" } } */ +/* { dg-final { scan-assembler "_Z2f29__SVE_VLSIu10__SVInt8_tLj2048EE:" } } */ +/* { dg-final { scan-assembler "_Z2f39__SVE_VLSIu11__SVInt16_tLj2048EE:" } } */ +/* { dg-final { scan-assembler "_Z2f49__SVE_VLSIu11__SVInt32_tLj2048EE:" } } */ +/* { dg-final { scan-assembler "_Z2f59__SVE_VLSIu11__SVInt64_tLj2048EE:" } } */ +/* { dg-final { scan-assembler "_Z2f69__SVE_VLSIu11__SVUint8_tLj2048EE:" } } */ +/* { dg-final { scan-assembler "_Z2f79__SVE_VLSIu12__SVUint16_tLj2048EE:" } } */ +/* { dg-final { scan-assembler "_Z2f89__SVE_VLSIu12__SVUint32_tLj2048EE:" } } */ +/* { dg-final { scan-assembler "_Z2f99__SVE_VLSIu12__SVUint64_tLj2048EE:" } } */ +/* { dg-final { scan-assembler "_Z3f109__SVE_VLSIu13__SVFloat16_tLj2048EE:" } } */ +/* { dg-final { scan-assembler "_Z3f119__SVE_VLSIu13__SVFloat32_tLj2048EE:" } } */ +/* { dg-final { scan-assembler "_Z3f129__SVE_VLSIu13__SVFloat64_tLj2048EE:" } } */ +/* { dg-final { scan-assembler "_Z3f139__SVE_VLSIu14__SVBfloat16_tLj2048EE:" } } */ diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_6.C b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_6.C new file mode 100644 index 00000000000..50009b67f93 --- /dev/null +++ b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_6.C @@ -0,0 +1,36 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target aarch64_little_endian } */ +/* { dg-additional-options "-msve-vector-bits=128" } */ + +#include <arm_sve.h> + +#define N __ARM_FEATURE_SVE_BITS +#define FIXED_ATTR __attribute__ ((arm_sve_vector_bits (N))) + +void f1(svbool_t FIXED_ATTR) {} +void f2(svint8_t FIXED_ATTR) {} +void f3(svint16_t FIXED_ATTR) {} +void f4(svint32_t FIXED_ATTR) {} +void f5(svint64_t FIXED_ATTR) {} +void f6(svuint8_t FIXED_ATTR) {} +void f7(svuint16_t FIXED_ATTR) {} +void f8(svuint32_t FIXED_ATTR) {} +void f9(svuint64_t FIXED_ATTR) {} +void f10(svfloat16_t FIXED_ATTR) {} +void f11(svfloat32_t FIXED_ATTR) {} +void f12(svfloat64_t FIXED_ATTR) {} +void f13(svbfloat16_t FIXED_ATTR) {} + +/* { dg-final { scan-assembler "_Z2f19__SVE_VLSIu10__SVBool_tLj128EE:" } } */ +/* { dg-final { scan-assembler "_Z2f29__SVE_VLSIu10__SVInt8_tLj128EE:" } } */ +/* { dg-final { scan-assembler "_Z2f39__SVE_VLSIu11__SVInt16_tLj128EE:" } } */ +/* { dg-final { scan-assembler "_Z2f49__SVE_VLSIu11__SVInt32_tLj128EE:" } } */ +/* { dg-final { scan-assembler "_Z2f59__SVE_VLSIu11__SVInt64_tLj128EE:" } } */ +/* { dg-final { scan-assembler "_Z2f69__SVE_VLSIu11__SVUint8_tLj128EE:" } } */ +/* { dg-final { scan-assembler "_Z2f79__SVE_VLSIu12__SVUint16_tLj128EE:" } } */ +/* { dg-final { scan-assembler "_Z2f89__SVE_VLSIu12__SVUint32_tLj128EE:" } } */ +/* { dg-final { scan-assembler "_Z2f99__SVE_VLSIu12__SVUint64_tLj128EE:" } } */ +/* { dg-final { scan-assembler "_Z3f109__SVE_VLSIu13__SVFloat16_tLj128EE:" } } */ +/* { dg-final { scan-assembler "_Z3f119__SVE_VLSIu13__SVFloat32_tLj128EE:" } } */ +/* { dg-final { scan-assembler "_Z3f129__SVE_VLSIu13__SVFloat64_tLj128EE:" } } */ +/* { dg-final { scan-assembler "_Z3f139__SVE_VLSIu14__SVBfloat16_tLj128EE:" } } */ diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_7.C b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_7.C new file mode 100644 index 00000000000..45cc1d2f31e --- /dev/null +++ b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_7.C @@ -0,0 +1,19 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target aarch64_little_endian } */ +/* { dg-additional-options "-msve-vector-bits=256" } */ + +#include "mangle_6.C" + +/* { dg-final { scan-assembler "_Z2f19__SVE_VLSIu10__SVBool_tLj256EE:" } } */ +/* { dg-final { scan-assembler "_Z2f29__SVE_VLSIu10__SVInt8_tLj256EE:" } } */ +/* { dg-final { scan-assembler "_Z2f39__SVE_VLSIu11__SVInt16_tLj256EE:" } } */ +/* { dg-final { scan-assembler "_Z2f49__SVE_VLSIu11__SVInt32_tLj256EE:" } } */ +/* { dg-final { scan-assembler "_Z2f59__SVE_VLSIu11__SVInt64_tLj256EE:" } } */ +/* { dg-final { scan-assembler "_Z2f69__SVE_VLSIu11__SVUint8_tLj256EE:" } } */ +/* { dg-final { scan-assembler "_Z2f79__SVE_VLSIu12__SVUint16_tLj256EE:" } } */ +/* { dg-final { scan-assembler "_Z2f89__SVE_VLSIu12__SVUint32_tLj256EE:" } } */ +/* { dg-final { scan-assembler "_Z2f99__SVE_VLSIu12__SVUint64_tLj256EE:" } } */ +/* { dg-final { scan-assembler "_Z3f109__SVE_VLSIu13__SVFloat16_tLj256EE:" } } */ +/* { dg-final { scan-assembler "_Z3f119__SVE_VLSIu13__SVFloat32_tLj256EE:" } } */ +/* { dg-final { scan-assembler "_Z3f129__SVE_VLSIu13__SVFloat64_tLj256EE:" } } */ +/* { dg-final { scan-assembler "_Z3f139__SVE_VLSIu14__SVBfloat16_tLj256EE:" } } */ diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_8.C b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_8.C new file mode 100644 index 00000000000..96e03c577cc --- /dev/null +++ b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_8.C @@ -0,0 +1,19 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target aarch64_little_endian } */ +/* { dg-additional-options "-msve-vector-bits=512" } */ + +#include "mangle_6.C" + +/* { dg-final { scan-assembler "_Z2f19__SVE_VLSIu10__SVBool_tLj512EE:" } } */ +/* { dg-final { scan-assembler "_Z2f29__SVE_VLSIu10__SVInt8_tLj512EE:" } } */ +/* { dg-final { scan-assembler "_Z2f39__SVE_VLSIu11__SVInt16_tLj512EE:" } } */ +/* { dg-final { scan-assembler "_Z2f49__SVE_VLSIu11__SVInt32_tLj512EE:" } } */ +/* { dg-final { scan-assembler "_Z2f59__SVE_VLSIu11__SVInt64_tLj512EE:" } } */ +/* { dg-final { scan-assembler "_Z2f69__SVE_VLSIu11__SVUint8_tLj512EE:" } } */ +/* { dg-final { scan-assembler "_Z2f79__SVE_VLSIu12__SVUint16_tLj512EE:" } } */ +/* { dg-final { scan-assembler "_Z2f89__SVE_VLSIu12__SVUint32_tLj512EE:" } } */ +/* { dg-final { scan-assembler "_Z2f99__SVE_VLSIu12__SVUint64_tLj512EE:" } } */ +/* { dg-final { scan-assembler "_Z3f109__SVE_VLSIu13__SVFloat16_tLj512EE:" } } */ +/* { dg-final { scan-assembler "_Z3f119__SVE_VLSIu13__SVFloat32_tLj512EE:" } } */ +/* { dg-final { scan-assembler "_Z3f129__SVE_VLSIu13__SVFloat64_tLj512EE:" } } */ +/* { dg-final { scan-assembler "_Z3f139__SVE_VLSIu14__SVBfloat16_tLj512EE:" } } */ diff --git a/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_9.C b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_9.C new file mode 100644 index 00000000000..3f432af469a --- /dev/null +++ b/gcc/testsuite/g++.target/aarch64/sve/acle/general-c++/mangle_9.C @@ -0,0 +1,19 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target aarch64_little_endian } */ +/* { dg-additional-options "-msve-vector-bits=1024" } */ + +#include "mangle_6.C" + +/* { dg-final { scan-assembler "_Z2f19__SVE_VLSIu10__SVBool_tLj1024EE:" } } */ +/* { dg-final { scan-assembler "_Z2f29__SVE_VLSIu10__SVInt8_tLj1024EE:" } } */ +/* { dg-final { scan-assembler "_Z2f39__SVE_VLSIu11__SVInt16_tLj1024EE:" } } */ +/* { dg-final { scan-assembler "_Z2f49__SVE_VLSIu11__SVInt32_tLj1024EE:" } } */ +/* { dg-final { scan-assembler "_Z2f59__SVE_VLSIu11__SVInt64_tLj1024EE:" } } */ +/* { dg-final { scan-assembler "_Z2f69__SVE_VLSIu11__SVUint8_tLj1024EE:" } } */ +/* { dg-final { scan-assembler "_Z2f79__SVE_VLSIu12__SVUint16_tLj1024EE:" } } */ +/* { dg-final { scan-assembler "_Z2f89__SVE_VLSIu12__SVUint32_tLj1024EE:" } } */ +/* { dg-final { scan-assembler "_Z2f99__SVE_VLSIu12__SVUint64_tLj1024EE:" } } */ +/* { dg-final { scan-assembler "_Z3f109__SVE_VLSIu13__SVFloat16_tLj1024EE:" } } */ +/* { dg-final { scan-assembler "_Z3f119__SVE_VLSIu13__SVFloat32_tLj1024EE:" } } */ +/* { dg-final { scan-assembler "_Z3f129__SVE_VLSIu13__SVFloat64_tLj1024EE:" } } */ +/* { dg-final { scan-assembler "_Z3f139__SVE_VLSIu14__SVBfloat16_tLj1024EE:" } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general/attributes_7.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/attributes_7.c index 55d9deace0c..e2e74700a01 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general/attributes_7.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/attributes_7.c @@ -23,6 +23,10 @@ typedef svbool_t bad_type_2 __attribute__ ((arm_sve_vector_bits)); // { dg-error typedef svbool_t bad_type_3 __attribute__ ((arm_sve_vector_bits (N, N))); // { dg-error {wrong number of arguments specified for 'arm_sve_vector_bits' attribute} } typedef svbool_t bad_type_4 __attribute__ ((arm_sve_vector_bits ("256"))); // { dg-error {'arm_sve_vector_bits' requires an integer constant expression} } typedef svbool_t bad_type_5 __attribute__ ((arm_sve_vector_bits (100))); // { dg-warning {unsupported SVE vector size} } +typedef svint32x2_t bad_type_6 __attribute__ ((arm_sve_vector_bits (N))); // { dg-error {'arm_sve_vector_bits' applied to non-vector type 'svint32x2_t'} } +typedef svint8_t bad_type_7 __attribute__ ((arm_sve_vector_bits (N))) __attribute__ ((arm_sve_vector_bits (N))); // { dg-error {'arm_sve_vector_bits' applied to type 'svint8_t __attribute__\(\(arm_sve_vector_bits\([0-9]+\)\)\)', which already has a size} } +typedef fixed_bool_t bad_type_8 __attribute__ ((arm_sve_vector_bits (N))) __attribute__ ((arm_sve_vector_bits (N))); // { dg-error {'arm_sve_vector_bits' applied to type 'fixed_bool_t' {aka 'svbool_t __attribute__\(\(arm_sve_vector_bits\([0-9]+\)\)\)'}, which already has a size} } +typedef gnu_int8_t bad_type_9 __attribute__ ((arm_sve_vector_bits (N))) __attribute__ ((arm_sve_vector_bits (N))); // { dg-error {'arm_sve_vector_bits' applied to non-SVE type 'gnu_int8_t'} } void f (int c) -- 2.17.1
>From 72f2f177a17c2c1d5b09ca3c524fa22ef251b934 Mon Sep 17 00:00:00 2001 From: Richard Sandiford <richard.sandif...@arm.com> Date: Tue, 29 Sep 2020 11:22:05 +0100 Subject: [PATCH 3/5] aarch64: Update feature macro name GCC used the name __ARM_FEATURE_SVE_VECTOR_OPERATIONS, but in the final spec it was renamed to__ARM_FEATURE_SVE_VECTOR_OPERATORS. gcc/ * config/aarch64/aarch64-c.c (aarch64_update_cpp_builtins): Rename __ARM_FEATURE_SVE_VECTOR_OPERATIONS to __ARM_FEATURE_SVE_VECTOR_OPERATORS. gcc/testsuite/ * gcc.target/aarch64/sve/acle/general/attributes_1.c: Rename __ARM_FEATURE_SVE_VECTOR_OPERATIONS to __ARM_FEATURE_SVE_VECTOR_OPERATORS. (cherry picked from commit ef4af9eddea5a658eb7d6dc29fcb58aa54c9dd9f) --- gcc/config/aarch64/aarch64-c.c | 2 +- .../gcc.target/aarch64/sve/acle/general/attributes_1.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/gcc/config/aarch64/aarch64-c.c b/gcc/config/aarch64/aarch64-c.c index 1a1f4ecef04..fd08be47570 100644 --- a/gcc/config/aarch64/aarch64-c.c +++ b/gcc/config/aarch64/aarch64-c.c @@ -149,7 +149,7 @@ aarch64_update_cpp_builtins (cpp_reader *pfile) bits = 0; builtin_define_with_int_value ("__ARM_FEATURE_SVE_BITS", bits); } - aarch64_def_or_undef (TARGET_SVE, "__ARM_FEATURE_SVE_VECTOR_OPERATIONS", + aarch64_def_or_undef (TARGET_SVE, "__ARM_FEATURE_SVE_VECTOR_OPERATORS", pfile); aarch64_def_or_undef (TARGET_SVE_I8MM, "__ARM_FEATURE_SVE_MATMUL_INT8", pfile); diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general/attributes_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/attributes_1.c index 6cd4f99911e..17acfc32e78 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general/attributes_1.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/attributes_1.c @@ -6,8 +6,8 @@ #error "__ARM_FEATURE_SVE_BITS is not defined but should be" #endif -#if __ARM_FEATURE_SVE_VECTOR_OPERATIONS != 1 -#error "__ARM_FEATURE_SVE_VECTOR_OPERATIONS should be equal to 1" +#if __ARM_FEATURE_SVE_VECTOR_OPERATORS != 1 +#error "__ARM_FEATURE_SVE_VECTOR_OPERATORS should be equal to 1" #endif #ifndef __cplusplus -- 2.17.1
>From a53ad13e710ac48e9ac0e93f63a5c3d1617f1557 Mon Sep 17 00:00:00 2001 From: Richard Sandiford <richard.sandif...@arm.com> Date: Tue, 29 Sep 2020 11:22:06 +0100 Subject: [PATCH 4/5] aarch64: Prevent canary address being spilled to stack This patch fixes the equivalent of arm bug PR85434/CVE-2018-12886 for aarch64: under high register pressure, the -fstack-protector code might spill the address of the canary onto the stack and reload it at the test site, giving an attacker the opportunity to change the expected canary value. This would happen in two cases: - when generating PIC for -mstack-protector-guard=global (tested by stack-protector-6.c). This is a direct analogue of PR85434, which was also about PIC for the global case. - when using -mstack-protector-guard=sysreg. The two problems were really separate bugs and caused by separate code, but it was more convenient to fix them together. The post-patch code still spills _GLOBAL_OFFSET_TABLE_ for stack-protector-6.c, which is a more general problem. However, it no longer spills the canary address itself. The patch also fixes an ICE when using -mstack-protector-guard=sysreg with ILP32: even if the register read is SImode, the address calculation itself should still be DImode. gcc/ * config/aarch64/aarch64-protos.h (aarch64_salt_type): New enum. (aarch64_stack_protect_canary_mem): Declare. * config/aarch64/aarch64.md (UNSPEC_SALT_ADDR): New unspec. (stack_protect_set): Forward to stack_protect_combined_set. (stack_protect_combined_set): New pattern. Use aarch64_stack_protect_canary_mem. (reg_stack_protect_address_<mode>): Add a salt operand. (stack_protect_test): Forward to stack_protect_combined_test. (stack_protect_combined_test): New pattern. Use aarch64_stack_protect_canary_mem. * config/aarch64/aarch64.c (strip_salt): New function. (strip_offset_and_salt): Likewise. (tls_symbolic_operand_type): Use strip_offset_and_salt. (aarch64_stack_protect_canary_mem): New function. (aarch64_cannot_force_const_mem): Use strip_offset_and_salt. (aarch64_classify_address): Likewise. (aarch64_symbolic_address_p): Likewise. (aarch64_print_operand): Likewise. (aarch64_output_addr_const_extra): New function. (aarch64_tls_symbol_p): Use strip_salt. (aarch64_classify_symbol): Likewise. (aarch64_legitimate_pic_operand_p): Use strip_offset_and_salt. (aarch64_legitimate_constant_p): Likewise. (aarch64_mov_operand_p): Use strip_salt. (TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA): Override. gcc/testsuite/ * gcc.target/aarch64/stack-protector-5.c: New test. * gcc.target/aarch64/stack-protector-6.c: Likewise. * gcc.target/aarch64/stack-protector-7.c: Likewise. (cherry picked from commit 74b27d8eedc7a4c0e8276345107790e6b3c023cb) --- gcc/config/aarch64/aarch64-protos.h | 20 +++ gcc/config/aarch64/aarch64.c | 164 +++++++++++++----- gcc/config/aarch64/aarch64.md | 85 ++++----- .../gcc.target/aarch64/stack-protector-5.c | 23 +++ .../gcc.target/aarch64/stack-protector-6.c | 8 + .../gcc.target/aarch64/stack-protector-7.c | 25 +++ 6 files changed, 228 insertions(+), 97 deletions(-) create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-protector-5.c create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-protector-6.c create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-protector-7.c diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h index 839f801a31b..be220970c77 100644 --- a/gcc/config/aarch64/aarch64-protos.h +++ b/gcc/config/aarch64/aarch64-protos.h @@ -136,6 +136,25 @@ enum aarch64_addr_query_type { ADDR_QUERY_ANY }; +/* Enumerates values that can be arbitrarily mixed into a calculation + in order to make the result of the calculation unique to its use case. + + AARCH64_SALT_SSP_SET + AARCH64_SALT_SSP_TEST + Used when calculating the address of the stack protection canary value. + There is a separate value for setting and testing the canary, meaning + that these two operations produce unique addresses: they are different + from each other, and from all other address calculations. + + The main purpose of this is to prevent the SET address being spilled + to the stack and reloaded for the TEST, since that would give an + attacker the opportunity to change the address of the expected + canary value. */ +enum aarch64_salt_type { + AARCH64_SALT_SSP_SET, + AARCH64_SALT_SSP_TEST +}; + /* A set of tuning parameters contains references to size and time cost models and vectors for address cost calculations, register move costs and memory move costs. */ @@ -608,6 +627,7 @@ opt_machine_mode aarch64_ptrue_all_mode (rtx); rtx aarch64_convert_sve_data_to_pred (rtx, machine_mode, rtx); rtx aarch64_expand_sve_dupq (rtx, machine_mode, rtx); void aarch64_expand_mov_immediate (rtx, rtx); +rtx aarch64_stack_protect_canary_mem (machine_mode, rtx, aarch64_salt_type); rtx aarch64_ptrue_reg (machine_mode); rtx aarch64_pfalse_reg (machine_mode); bool aarch64_sve_pred_dominates_p (rtx *, rtx); diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index e1bba40f651..7d53072996d 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -1935,6 +1935,29 @@ aarch64_sve_abi (void) return sve_abi; } +/* If X is an UNSPEC_SALT_ADDR expression, return the address that it + wraps, otherwise return X itself. */ + +static rtx +strip_salt (rtx x) +{ + rtx search = x; + if (GET_CODE (search) == CONST) + search = XEXP (search, 0); + if (GET_CODE (search) == UNSPEC && XINT (search, 1) == UNSPEC_SALT_ADDR) + x = XVECEXP (search, 0, 0); + return x; +} + +/* Like strip_offset, but also strip any UNSPEC_SALT_ADDR from the + expression. */ + +static rtx +strip_offset_and_salt (rtx addr, poly_int64 *offset) +{ + return strip_salt (strip_offset (addr, offset)); +} + /* Generate code to enable conditional branches in functions over 1 MiB. */ const char * aarch64_gen_far_branch (rtx * operands, int pos_label, const char * dest, @@ -2932,14 +2955,9 @@ static enum tls_model tls_symbolic_operand_type (rtx addr) { enum tls_model tls_kind = TLS_MODEL_NONE; - if (GET_CODE (addr) == CONST) - { - poly_int64 addend; - rtx sym = strip_offset (addr, &addend); - if (GET_CODE (sym) == SYMBOL_REF) - tls_kind = SYMBOL_REF_TLS_MODEL (sym); - } - else if (GET_CODE (addr) == SYMBOL_REF) + poly_int64 offset; + addr = strip_offset_and_salt (addr, &offset); + if (GET_CODE (addr) == SYMBOL_REF) tls_kind = SYMBOL_REF_TLS_MODEL (addr); return tls_kind; @@ -5239,6 +5257,48 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm) as_a <scalar_int_mode> (mode)); } +/* Return the MEM rtx that provides the canary value that should be used + for stack-smashing protection. MODE is the mode of the memory. + For SSP_GLOBAL, DECL_RTL is the MEM rtx for the canary variable + (__stack_chk_guard), otherwise it has no useful value. SALT_TYPE + indicates whether the caller is performing a SET or a TEST operation. */ + +rtx +aarch64_stack_protect_canary_mem (machine_mode mode, rtx decl_rtl, + aarch64_salt_type salt_type) +{ + rtx addr; + if (aarch64_stack_protector_guard == SSP_GLOBAL) + { + gcc_assert (MEM_P (decl_rtl)); + addr = XEXP (decl_rtl, 0); + poly_int64 offset; + rtx base = strip_offset_and_salt (addr, &offset); + if (!SYMBOL_REF_P (base)) + return decl_rtl; + + rtvec v = gen_rtvec (2, base, GEN_INT (salt_type)); + addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_SALT_ADDR); + addr = gen_rtx_CONST (Pmode, addr); + addr = plus_constant (Pmode, addr, offset); + } + else + { + /* Calculate the address from the system register. */ + rtx salt = GEN_INT (salt_type); + addr = gen_reg_rtx (mode); + if (mode == DImode) + emit_insn (gen_reg_stack_protect_address_di (addr, salt)); + else + { + emit_insn (gen_reg_stack_protect_address_si (addr, salt)); + addr = convert_memory_address (Pmode, addr); + } + addr = plus_constant (Pmode, addr, aarch64_stack_protector_guard_offset); + } + return gen_rtx_MEM (mode, force_reg (Pmode, addr)); +} + /* Emit an SVE predicated move from SRC to DEST. PRED is a predicate that is known to contain PTRUE. */ @@ -8658,8 +8718,6 @@ aarch64_move_imm (HOST_WIDE_INT val, machine_mode mode) static bool aarch64_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x) { - rtx base, offset; - if (GET_CODE (x) == HIGH) return true; @@ -8669,10 +8727,12 @@ aarch64_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x) if (GET_CODE (*iter) == CONST_POLY_INT) return true; - split_const (x, &base, &offset); + poly_int64 offset; + rtx base = strip_offset_and_salt (x, &offset); if (GET_CODE (base) == SYMBOL_REF || GET_CODE (base) == LABEL_REF) { - if (aarch64_classify_symbol (base, INTVAL (offset)) + /* We checked for POLY_INT_CST offsets above. */ + if (aarch64_classify_symbol (base, offset.to_constant ()) != SYMBOL_FORCE_TO_MEM) return true; else @@ -9198,9 +9258,8 @@ aarch64_classify_address (struct aarch64_address_info *info, && GET_MODE_SIZE (mode).is_constant (&const_size) && const_size >= 4) { - rtx sym, addend; - - split_const (x, &sym, &addend); + poly_int64 offset; + rtx sym = strip_offset_and_salt (x, &offset); return ((GET_CODE (sym) == LABEL_REF || (GET_CODE (sym) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (sym) @@ -9215,10 +9274,12 @@ aarch64_classify_address (struct aarch64_address_info *info, if (allow_reg_index_p && aarch64_base_register_rtx_p (info->base, strict_p)) { - rtx sym, offs; - split_const (info->offset, &sym, &offs); + poly_int64 offset; + HOST_WIDE_INT const_offset; + rtx sym = strip_offset_and_salt (info->offset, &offset); if (GET_CODE (sym) == SYMBOL_REF - && (aarch64_classify_symbol (sym, INTVAL (offs)) + && offset.is_constant (&const_offset) + && (aarch64_classify_symbol (sym, const_offset) == SYMBOL_SMALL_ABSOLUTE)) { /* The symbol and offset must be aligned to the access size. */ @@ -9244,7 +9305,7 @@ aarch64_classify_address (struct aarch64_address_info *info, if (known_eq (ref_size, 0)) ref_size = GET_MODE_SIZE (DImode); - return (multiple_p (INTVAL (offs), ref_size) + return (multiple_p (const_offset, ref_size) && multiple_p (align / BITS_PER_UNIT, ref_size)); } } @@ -9276,9 +9337,8 @@ aarch64_address_valid_for_prefetch_p (rtx x, bool strict_p) bool aarch64_symbolic_address_p (rtx x) { - rtx offset; - - split_const (x, &x, &offset); + poly_int64 offset; + x = strip_offset_and_salt (x, &offset); return GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF; } @@ -10009,27 +10069,16 @@ aarch64_print_operand (FILE *f, rtx x, int code) switch (code) { case 'c': - switch (GET_CODE (x)) + if (CONST_INT_P (x)) + fprintf (f, HOST_WIDE_INT_PRINT_DEC, INTVAL (x)); + else { - case CONST_INT: - fprintf (f, HOST_WIDE_INT_PRINT_DEC, INTVAL (x)); - break; - - case SYMBOL_REF: - output_addr_const (f, x); - break; - - case CONST: - if (GET_CODE (XEXP (x, 0)) == PLUS - && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF) - { - output_addr_const (f, x); - break; - } - /* Fall through. */ - - default: - output_operand_lossage ("unsupported operand for code '%c'", code); + poly_int64 offset; + rtx base = strip_offset_and_salt (x, &offset); + if (SYMBOL_REF_P (base)) + output_addr_const (f, x); + else + output_operand_lossage ("unsupported operand for code '%c'", code); } break; @@ -10604,6 +10653,19 @@ aarch64_print_operand_address (FILE *f, machine_mode mode, rtx x) output_addr_const (f, x); } +/* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */ + +static bool +aarch64_output_addr_const_extra (FILE *file, rtx x) +{ + if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_SALT_ADDR) + { + output_addr_const (file, XVECEXP (x, 0, 0)); + return true; + } + return false; +} + bool aarch64_label_mentioned_p (rtx x) { @@ -15887,6 +15949,7 @@ aarch64_tls_symbol_p (rtx x) if (! TARGET_HAVE_TLS) return false; + x = strip_salt (x); if (GET_CODE (x) != SYMBOL_REF) return false; @@ -15942,6 +16005,8 @@ aarch64_classify_tls_symbol (rtx x) enum aarch64_symbol_type aarch64_classify_symbol (rtx x, HOST_WIDE_INT offset) { + x = strip_salt (x); + if (GET_CODE (x) == LABEL_REF) { switch (aarch64_cmodel) @@ -16041,11 +16106,10 @@ aarch64_constant_address_p (rtx x) bool aarch64_legitimate_pic_operand_p (rtx x) { - if (GET_CODE (x) == SYMBOL_REF - || (GET_CODE (x) == CONST - && GET_CODE (XEXP (x, 0)) == PLUS - && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)) - return false; + poly_int64 offset; + x = strip_offset_and_salt (x, &offset); + if (GET_CODE (x) == SYMBOL_REF) + return false; return true; } @@ -16091,7 +16155,7 @@ aarch64_legitimate_constant_p (machine_mode mode, rtx x) /* If an offset is being added to something else, we need to allow the base to be moved into the destination register, meaning that there are no free temporaries for the offset. */ - x = strip_offset (x, &offset); + x = strip_offset_and_salt (x, &offset); if (!offset.is_constant () && aarch64_offset_temporaries (true, offset) > 0) return false; @@ -17988,6 +18052,7 @@ aarch64_mov_operand_p (rtx x, machine_mode mode) return aarch64_simd_valid_immediate (x, NULL); } + x = strip_salt (x); if (GET_CODE (x) == SYMBOL_REF && mode == DImode && CONSTANT_ADDRESS_P (x)) return true; @@ -23672,6 +23737,9 @@ aarch64_libgcc_floating_mode_supported_p #undef TARGET_PRINT_OPERAND_ADDRESS #define TARGET_PRINT_OPERAND_ADDRESS aarch64_print_operand_address +#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA +#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA aarch64_output_addr_const_extra + #undef TARGET_OPTAB_SUPPORTED_P #define TARGET_OPTAB_SUPPORTED_P aarch64_optab_supported_p diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md index a5d865a884a..da3ab66893a 100644 --- a/gcc/config/aarch64/aarch64.md +++ b/gcc/config/aarch64/aarch64.md @@ -281,6 +281,7 @@ (define_c_enum "unspec" [ UNSPEC_GEN_TAG_RND ; Generate a random 4-bit MTE tag. UNSPEC_TAG_SPACE ; Translate address to MTE tag address space. UNSPEC_LD1RO + UNSPEC_SALT_ADDR ]) (define_c_enum "unspecv" [ @@ -7101,43 +7102,37 @@ (define_expand "get_thread_pointerdi" DONE; }) -;; Named patterns for stack smashing protection. +;; Defined for -mstack-protector-guard=sysreg, which goes through this +;; pattern rather than stack_protect_combined_set. Our implementation +;; of the latter can handle both. (define_expand "stack_protect_set" [(match_operand 0 "memory_operand") - (match_operand 1 "memory_operand")] + (match_operand 1 "")] "" { - machine_mode mode = GET_MODE (operands[0]); - if (aarch64_stack_protector_guard != SSP_GLOBAL) - { - /* Generate access through the system register. */ - rtx tmp_reg = gen_reg_rtx (mode); - if (mode == DImode) - { - emit_insn (gen_reg_stack_protect_address_di (tmp_reg)); - emit_insn (gen_adddi3 (tmp_reg, tmp_reg, - GEN_INT (aarch64_stack_protector_guard_offset))); - } - else - { - emit_insn (gen_reg_stack_protect_address_si (tmp_reg)); - emit_insn (gen_addsi3 (tmp_reg, tmp_reg, - GEN_INT (aarch64_stack_protector_guard_offset))); + emit_insn (gen_stack_protect_combined_set (operands[0], operands[1])); + DONE; +}) - } - operands[1] = gen_rtx_MEM (mode, tmp_reg); - } - +(define_expand "stack_protect_combined_set" + [(match_operand 0 "memory_operand") + (match_operand 1 "")] + "" +{ + machine_mode mode = GET_MODE (operands[0]); + operands[1] = aarch64_stack_protect_canary_mem (mode, operands[1], + AARCH64_SALT_SSP_SET); emit_insn ((mode == DImode ? gen_stack_protect_set_di : gen_stack_protect_set_si) (operands[0], operands[1])); DONE; }) +;; Operand 1 is either AARCH64_SALT_SSP_SET or AARCH64_SALT_SSP_TEST. (define_insn "reg_stack_protect_address_<mode>" [(set (match_operand:PTR 0 "register_operand" "=r") - (unspec:PTR [(const_int 0)] - UNSPEC_SSP_SYSREG))] + (unspec:PTR [(match_operand 1 "const_int_operand")] + UNSPEC_SSP_SYSREG))] "aarch64_stack_protector_guard != SSP_GLOBAL" { char buf[150]; @@ -7160,37 +7155,29 @@ (define_insn "stack_protect_set_<mode>" [(set_attr "length" "12") (set_attr "type" "multiple")]) +;; Defined for -mstack-protector-guard=sysreg, which goes through this +;; pattern rather than stack_protect_combined_test. Our implementation +;; of the latter can handle both. (define_expand "stack_protect_test" [(match_operand 0 "memory_operand") - (match_operand 1 "memory_operand") + (match_operand 1 "") (match_operand 2)] "" { - machine_mode mode = GET_MODE (operands[0]); - - if (aarch64_stack_protector_guard != SSP_GLOBAL) - { - /* Generate access through the system register. The - sequence we want here is the access - of the stack offset to come with - mrs scratch_reg, <system_register> - add scratch_reg, scratch_reg, :lo12:offset. */ - rtx tmp_reg = gen_reg_rtx (mode); - if (mode == DImode) - { - emit_insn (gen_reg_stack_protect_address_di (tmp_reg)); - emit_insn (gen_adddi3 (tmp_reg, tmp_reg, - GEN_INT (aarch64_stack_protector_guard_offset))); - } - else - { - emit_insn (gen_reg_stack_protect_address_si (tmp_reg)); - emit_insn (gen_addsi3 (tmp_reg, tmp_reg, - GEN_INT (aarch64_stack_protector_guard_offset))); + emit_insn (gen_stack_protect_combined_test (operands[0], operands[1], + operands[2])); + DONE; +}) - } - operands[1] = gen_rtx_MEM (mode, tmp_reg); - } +(define_expand "stack_protect_combined_test" + [(match_operand 0 "memory_operand") + (match_operand 1 "") + (match_operand 2)] + "" +{ + machine_mode mode = GET_MODE (operands[0]); + operands[1] = aarch64_stack_protect_canary_mem (mode, operands[1], + AARCH64_SALT_SSP_TEST); emit_insn ((mode == DImode ? gen_stack_protect_test_di : gen_stack_protect_test_si) (operands[0], operands[1])); diff --git a/gcc/testsuite/gcc.target/aarch64/stack-protector-5.c b/gcc/testsuite/gcc.target/aarch64/stack-protector-5.c new file mode 100644 index 00000000000..a9cd53b2eac --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/stack-protector-5.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-options "-fstack-protector-all -O2" } */ + +void __attribute__ ((noipa)) +f (void) +{ + volatile int x; + asm volatile ("" ::: + "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", + "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", + "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", + "x24", "x25", "x26", "x27", "x28", "x30"); +} + +/* The register clobbers above should not generate any single LDRs or STRs; + all registers should be saved and restored in pairs. The only STRs + should be therefore be those associated with the stack protector + tests themselves. + + Make sure the address of the canary value is not spilled and reloaded, + since that would give the attacker an opportunity to change the + canary value. */ +/* { dg-final { scan-assembler-times {\tstr\t} 1 } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/stack-protector-6.c b/gcc/testsuite/gcc.target/aarch64/stack-protector-6.c new file mode 100644 index 00000000000..e2ac0885eba --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/stack-protector-6.c @@ -0,0 +1,8 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target fpic } */ +/* { dg-options "-fstack-protector-all -O2 -fpic" } */ + +#include "stack-protector-5.c" + +/* See the comment in stack-protector-5.c. */ +/* { dg-final { scan-assembler-times {\tldr\t[^\n]*__stack_chk_guard} 2 } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/stack-protector-7.c b/gcc/testsuite/gcc.target/aarch64/stack-protector-7.c new file mode 100644 index 00000000000..e644768fe5e --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/stack-protector-7.c @@ -0,0 +1,25 @@ +/* { dg-do compile } */ +/* { dg-options "-fstack-protector-all -mstack-protector-guard=sysreg -mstack-protector-guard-offset=16 -mstack-protector-guard-reg=tpidr_el0 -O2" } */ + +void __attribute__ ((noipa)) +f (void) +{ + volatile int x; + asm volatile ("" ::: + "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", + "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", + "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", + "x24", "x25", "x26", "x27", "x28", "x30"); +} + +/* The register clobbers above should not generate any single LDRs or STRs; + all registers should be saved and restored in pairs. The only LDRs and + STRs should be therefore be those associated with the stack protector + tests themselves. + + Make sure the address of the canary value (tpidr_el0 + 16) is not + spilled and reloaded, since that would give the attacker an opportunity + to change the canary value. */ +/* { dg-final { scan-assembler-times {\tmrs\t} 2 } } */ +/* { dg-final { scan-assembler-times {\tstr\t} 1 } } */ +/* { dg-final { scan-assembler-times {\tldr\t} 3 } } */ -- 2.17.1
>From 59d2f2c7717298574c23b63d5a60acb850bd4e31 Mon Sep 17 00:00:00 2001 From: Richard Sandiford <richard.sandif...@arm.com> Date: Tue, 29 Sep 2020 11:22:07 +0100 Subject: [PATCH 5/5] arm: Fix canary address calculation for non-PIC For non-PIC, the stack protector patterns did: rtx mem = XEXP (force_const_mem (SImode, operands[1]), 0); emit_move_insn (operands[2], mem); Here, operands[1] is the address of the canary (&__stack_chk_guard) and operands[2] is the register that we want to move that address into. However, the code above instead sets operands[2] to the address of a constant pool entry that contains &__stack_chk_guard, rather than to &__stack_chk_guard itself. The sequence therefore does one less pointer indirection than it should. The net effect was to use &__stack_chk_guard for stack-smash detection, instead of using __stack_chk_guard itself. gcc/ * config/arm/arm.md (*stack_protect_combined_set_insn): For non-PIC, load the address of the canary rather than the address of the constant pool entry that points to it. (*stack_protect_combined_test_insn): Likewise. gcc/testsuite/ * gcc.target/arm/stack-protector-3.c: New test. * gcc.target/arm/stack-protector-4.c: Likewise. (cherry picked from commit e94797250b403d66cb3624a594e41faf0dd76617) --- gcc/config/arm/arm.md | 4 +- .../gcc.target/arm/stack-protector-3.c | 38 +++++++++++++++++++ .../gcc.target/arm/stack-protector-4.c | 6 +++ 3 files changed, 46 insertions(+), 2 deletions(-) create mode 100644 gcc/testsuite/gcc.target/arm/stack-protector-3.c create mode 100644 gcc/testsuite/gcc.target/arm/stack-protector-4.c diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md index dd13c77e889..6616eff7802 100644 --- a/gcc/config/arm/arm.md +++ b/gcc/config/arm/arm.md @@ -9212,7 +9212,7 @@ (define_insn_and_split "*stack_protect_combined_set_insn" operands[2] = operands[1]; else { - rtx mem = XEXP (force_const_mem (SImode, operands[1]), 0); + rtx mem = force_const_mem (SImode, operands[1]); emit_move_insn (operands[2], mem); } } @@ -9295,7 +9295,7 @@ (define_insn_and_split "*stack_protect_combined_test_insn" operands[3] = operands[1]; else { - rtx mem = XEXP (force_const_mem (SImode, operands[1]), 0); + rtx mem = force_const_mem (SImode, operands[1]); emit_move_insn (operands[3], mem); } } diff --git a/gcc/testsuite/gcc.target/arm/stack-protector-3.c b/gcc/testsuite/gcc.target/arm/stack-protector-3.c new file mode 100644 index 00000000000..b8f77fa2309 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/stack-protector-3.c @@ -0,0 +1,38 @@ +/* { dg-do run } */ +/* { dg-require-effective-target fstack_protector } */ +/* { dg-options "-fstack-protector-all -O2" } */ + +extern volatile long *stack_chk_guard_ptr; + +void __attribute__ ((noipa)) +f (void) +{ + volatile int x; + /* Munging the contents of __stack_chk_guard should trigger a + stack-smashing failure for this function. */ + *stack_chk_guard_ptr += 1; +} + +asm ( +" .data\n" +" .align 3\n" +" .globl stack_chk_guard_ptr\n" +"stack_chk_guard_ptr:\n" +" .word __stack_chk_guard\n" +" .weak __stack_chk_guard\n" +"__stack_chk_guard:\n" +" .word 0xdead4321\n" +" .text\n" +" .type __stack_chk_fail, %function\n" +"__stack_chk_fail:\n" +" movs r0, #0\n" +" b exit\n" +" .size __stack_chk_fail, .-__stack_chk_fail" +); + +int +main (void) +{ + f (); + __builtin_abort (); +} diff --git a/gcc/testsuite/gcc.target/arm/stack-protector-4.c b/gcc/testsuite/gcc.target/arm/stack-protector-4.c new file mode 100644 index 00000000000..6334dd00908 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/stack-protector-4.c @@ -0,0 +1,6 @@ +/* { dg-do run } */ +/* { dg-require-effective-target fstack_protector } */ +/* { dg-require-effective-target fpic } */ +/* { dg-options "-fstack-protector-all -O2 -fpic" } */ + +#include "stack-protector-3.c" -- 2.17.1