2011-10-27 Richard Henderson <r...@redhat.com> * c-cppbuiltin.c (c_cpp_builtins): Test both atomic and sync patterns. 2011-10-25 Andrew MacLeod <amacl...@redhat.com> * c-common.c (get_atomic_generic_size): New. Find size of generic atomic function parameters and do typechecking. (add_atomic_size_parameter): New. Insert size into parameter list. (resolve_overloaded_atomic_exchange): Restructure __atomic_exchange to either __atomic_exchange_n or external library call. (resolve_overloaded_atomic_compare_exchange): Restructure __atomic_compare_exchange to either _n variant or external library call. (resolve_overloaded_atomic_load): Restructure __atomic_load to either __atomic_load_n or an external library call. (resolve_overloaded_atomic_store): Restructure __atomic_store to either __atomic_store_n or an external library call. (resolve_overloaded_builtin): Handle 4 generic __atomic calls. 2011-10-20 Andrew MacLeod <amacl...@redhat.com> * c-common.c (resolve_overloaded_builtin): Don't try to process a return value with an error mark. 2011-10-17 Andrew MacLeod <amacl...@redhat.com> * c-common.c: Rename __sync_mem to __atomic. 2011-09-16 Andrew MacLeod <amacl...@redhat.com> * c-common.c (sync_resolve_params, sync_resolve_return): Only tweak parameters that are the same type size. (resolve_overloaded_builtin): Use new param for __sync_mem builtins. Support new NAND builtins. 2011-08-13 Andrew MacLeod <amacl...@redhat.com> * c-common.c (resolve_overloaded_builtin): Handle new __SYNC_MEM builtins. 2011-07-27 Andrew Macleod <amacl...@redhat.com> * c-common.c (resolve_overloaded_builtin): Handle BUILT_IN_SYNC_MEM_* types. 2011-06-23 Andrew Macleod <amacl...@redhat.com> * c-common.c (resolve_overloaded_builtin): Add BUILT_IN_SYNC_MEM_LOAD_N. 2011-06-21 Andrew Macleod <amacl...@redhat.com> Aldy Hernandez <al...@redhat.com> * c-common.c (BUILT_IN_SYNC_MEM_EXCHANGE_N): Add case. Index: c-family/c-common.c =================================================================== *** c-family/c-common.c (.../trunk/gcc) (revision 180790) --- c-family/c-common.c (.../branches/cxx-mem-model/gcc) (revision 180839) *************** sync_resolve_size (tree function, VEC(tr *** 9020,9026 **** was encountered; true on success. */ static bool ! sync_resolve_params (tree orig_function, tree function, VEC(tree, gc) *params) { function_args_iterator iter; tree ptype; --- 9020,9027 ---- was encountered; true on success. */ static bool ! sync_resolve_params (tree orig_function, tree function, VEC(tree, gc) *params, ! bool orig_format) { function_args_iterator iter; tree ptype; *************** sync_resolve_params (tree orig_function, *** 9052,9068 **** return false; } ! /* ??? Ideally for the first conversion we'd use convert_for_assignment ! so that we get warnings for anything that doesn't match the pointer ! type. This isn't portable across the C and C++ front ends atm. */ ! val = VEC_index (tree, params, parmnum); ! val = convert (ptype, val); ! val = convert (arg_type, val); ! VEC_replace (tree, params, parmnum, val); function_args_iter_next (&iter); } /* The definition of these primitives is variadic, with the remaining being "an optional list of variables protected by the memory barrier". No clue what that's supposed to mean, precisely, but we consider all --- 9053,9082 ---- return false; } ! /* Only convert parameters if the size is appropriate with new format ! sync routines. */ ! if (orig_format || ! tree_int_cst_equal (TYPE_SIZE (ptype), TYPE_SIZE (arg_type))) ! { ! /* Ideally for the first conversion we'd use convert_for_assignment ! so that we get warnings for anything that doesn't match the pointer ! type. This isn't portable across the C and C++ front ends atm. */ ! val = VEC_index (tree, params, parmnum); ! val = convert (ptype, val); ! val = convert (arg_type, val); ! VEC_replace (tree, params, parmnum, val); ! } function_args_iter_next (&iter); } + /* __atomic routines are not variadic. */ + if (!orig_format && VEC_length (tree, params) != parmnum + 1) + { + error ("too many arguments to function %qE", orig_function); + return false; + } + /* The definition of these primitives is variadic, with the remaining being "an optional list of variables protected by the memory barrier". No clue what that's supposed to mean, precisely, but we consider all *************** sync_resolve_params (tree orig_function, *** 9077,9089 **** PARAMS. */ static tree ! sync_resolve_return (tree first_param, tree result) { tree ptype = TREE_TYPE (TREE_TYPE (first_param)); ptype = TYPE_MAIN_VARIANT (ptype); ! return convert (ptype, result); } /* Some builtin functions are placeholders for other expressions. This function should be called immediately after parsing the call expression before surrounding code has committed to the type of the expression. --- 9091,9473 ---- PARAMS. */ static tree ! sync_resolve_return (tree first_param, tree result, bool orig_format) { tree ptype = TREE_TYPE (TREE_TYPE (first_param)); + tree rtype = TREE_TYPE (result); ptype = TYPE_MAIN_VARIANT (ptype); ! ! /* New format doesn't require casting unless the types are the same size. */ ! if (orig_format || tree_int_cst_equal (TYPE_SIZE (ptype), TYPE_SIZE (rtype))) ! return convert (ptype, result); ! else ! return result; } + /* This function verifies the PARAMS to generic atomic FUNCTION. + It returns the size if all the parameters are the same size, otherwise + 0 is returned if the parameters are invalid. */ + + static int + get_atomic_generic_size (tree function, VEC(tree,gc) *params) + { + unsigned int n_param; + unsigned int n_model; + unsigned int x; + int size_0; + tree type_0; + + /* Determine the parameter makeup. */ + switch (DECL_FUNCTION_CODE (function)) + { + case BUILT_IN_ATOMIC_EXCHANGE: + n_param = 4; + n_model = 1; + break; + case BUILT_IN_ATOMIC_LOAD: + case BUILT_IN_ATOMIC_STORE: + n_param = 3; + n_model = 1; + break; + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE: + n_param = 6; + n_model = 2; + break; + default: + return 0; + } + + if (VEC_length (tree, params) != n_param) + { + error ("Incorrect number of arguments to function %qE", function); + return 0; + } + + /* Get type of first parameter, and determine it's size. */ + type_0 = TREE_TYPE (VEC_index (tree, params, 0)); + if (TREE_CODE (type_0) != POINTER_TYPE) + { + error ("argument 0 of %qE must be a pointer type", function); + return 0; + } + size_0 = tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (type_0)), 1); + + /* Check each other parameter is a pointer and the same size. */ + for (x = 0; x < n_param - n_model; x++) + { + int size; + tree type = TREE_TYPE (VEC_index (tree, params, x)); + /* __atomic_compare_exchange has a bool in the 4th postion, skip it. */ + if (n_param == 6 && x == 3) + continue; + if (!POINTER_TYPE_P (type)) + { + error ("argument %d of %qE must be a pointer type", x, function); + return 0; + } + size = tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (type)), 1); + if (size != size_0) + { + error ("size mismatch in argument %d of %qE", x, function); + return 0; + } + } + + /* Check memory model parameters for validity. */ + for (x = n_param - n_model ; x < n_param; x++) + { + tree p = VEC_index (tree, params, x); + if (TREE_CODE (p) == INTEGER_CST) + { + int i = tree_low_cst (p, 1); + if (i < 0 || i >= MEMMODEL_LAST) + { + error ("invalid memory model argument %d of %qE", x, function); + return 0; + } + } + else + if (!INTEGRAL_TYPE_P (TREE_TYPE (p))) + { + error ("non-integer memory model argument %d of %qE", x, function); + return 0; + } + } + + return size_0; + } + + + /* This will take an __atomic_ generic FUNCTION call, and add a size parameter N + at the beginning of the parameter list PARAMS representing the size of the + objects. This is to match the library ABI requirement. LOC is the location + of the function call. + The new function is returned if it needed rebuilding, otherwise NULL_TREE is + returned to allow the external call to be constructed. */ + + static tree + add_atomic_size_parameter (unsigned n, location_t loc, tree function, + VEC(tree,gc) *params) + { + tree size_node; + + /* Insert a SIZE_T parameter as the first param. If there isn't + enough space, allocate a new vector and recursively re-build with that. */ + if (!VEC_space (tree, params, 1)) + { + unsigned int z, len; + VEC(tree,gc) *vec; + tree f; + + len = VEC_length (tree, params); + vec = VEC_alloc (tree, gc, len + 1); + for (z = 0; z < len; z++) + VEC_quick_push (tree, vec, VEC_index (tree, params, z)); + f = build_function_call_vec (loc, function, vec, NULL); + VEC_free (tree, gc, vec); + return f; + } + + /* Add the size parameter and leave as a function call for processing. */ + size_node = build_int_cst (size_type_node, n); + VEC_quick_insert (tree, params, 0, size_node); + return NULL_TREE; + } + + + /* This will process an __atomic_exchange function call, determine whether it + needs to be mapped to the _N variation, or turned into a library call. + LOC is the location of the builtin call. + FUNCTION is the DECL that has been invoked; + PARAMS is the argument list for the call. The return value is non-null + TRUE is returned if it is translated into the proper format for a call to the + external library, and NEW_RETURN is set the tree for that function. + FALSE is returned if processing for the _N variation is required, and + NEW_RETURN is set to the the return value the result is copied into. */ + static bool + resolve_overloaded_atomic_exchange (location_t loc, tree function, + VEC(tree,gc) *params, tree *new_return) + { + tree p0, p1, p2, p3; + tree I_type, I_type_ptr; + int n = get_atomic_generic_size (function, params); + + /* If not a lock-free size, change to the library generic format. */ + if (n != 1 && n != 2 && n != 4 && n != 8 && n != 16) + { + *new_return = add_atomic_size_parameter (n, loc, function, params); + return true; + } + + /* Otherwise there is a lockfree match, transform the call from: + void fn(T* mem, T* desired, T* return, model) + into + *return = (T) (fn (In* mem, (In) *desired, model)) */ + + p0 = VEC_index (tree, params, 0); + p1 = VEC_index (tree, params, 1); + p2 = VEC_index (tree, params, 2); + p3 = VEC_index (tree, params, 3); + + /* Create pointer to appropriate size. */ + I_type = builtin_type_for_size (BITS_PER_UNIT * n, 1); + I_type_ptr = build_pointer_type (I_type); + + /* Convert object pointer to required type. */ + p0 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p0); + VEC_replace (tree, params, 0, p0); + + /* Convert new value to required type, and dereference it. */ + p1 = build_indirect_ref (loc, p1, RO_UNARY_STAR); + p1 = build1 (VIEW_CONVERT_EXPR, I_type, p1); + VEC_replace (tree, params, 1, p1); + + /* Move memory model to the 3rd position, and end param list. */ + VEC_replace (tree, params, 2, p3); + VEC_truncate (tree, params, 3); + + /* Convert return pointer and dereference it for later assignment. */ + *new_return = build_indirect_ref (loc, p2, RO_UNARY_STAR); + + return false; + } + + + /* This will process an __atomic_compare_exchange function call, determine + whether it needs to be mapped to the _N variation, or turned into a lib call. + LOC is the location of the builtin call. + FUNCTION is the DECL that has been invoked; + PARAMS is the argument list for the call. The return value is non-null + TRUE is returned if it is translated into the proper format for a call to the + external library, and NEW_RETURN is set the tree for that function. + FALSE is returned if processing for the _N variation is required. */ + + static bool + resolve_overloaded_atomic_compare_exchange (location_t loc, tree function, + VEC(tree,gc) *params, + tree *new_return) + { + tree p0, p1, p2; + tree I_type, I_type_ptr; + int n = get_atomic_generic_size (function, params); + + /* If not a lock-free size, change to the library generic format. */ + if (n != 1 && n != 2 && n != 4 && n != 8 && n != 16) + { + /* The library generic format does not have the weak parameter, so + remove it from the param list. Since a parameter has been removed, + we can be sure that there is room for the SIZE_T parameter, meaning + there will not be a recursive rebuilding of the parameter list, so + there is no danger this will be done twice. */ + if (n > 0) + { + VEC_replace (tree, params, 3, VEC_index (tree, params, 4)); + VEC_replace (tree, params, 4, VEC_index (tree, params, 5)); + VEC_truncate (tree, params, 5); + } + *new_return = add_atomic_size_parameter (n, loc, function, params); + return true; + } + + /* Otherwise, there is a match, so the call needs to be transformed from: + bool fn(T* mem, T* desired, T* return, weak, success, failure) + into + bool fn ((In *)mem, (In *)expected, (In) *desired, weak, succ, fail) */ + + p0 = VEC_index (tree, params, 0); + p1 = VEC_index (tree, params, 1); + p2 = VEC_index (tree, params, 2); + + /* Create pointer to appropriate size. */ + I_type = builtin_type_for_size (BITS_PER_UNIT * n, 1); + I_type_ptr = build_pointer_type (I_type); + + /* Convert object pointer to required type. */ + p0 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p0); + VEC_replace (tree, params, 0, p0); + + /* Convert expected pointer to required type. */ + p1 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p1); + VEC_replace (tree, params, 1, p1); + + /* Convert desired value to required type, and dereference it. */ + p2 = build_indirect_ref (loc, p2, RO_UNARY_STAR); + p2 = build1 (VIEW_CONVERT_EXPR, I_type, p2); + VEC_replace (tree, params, 2, p2); + + /* The rest of the parameters are fine. NULL means no special return value + processing.*/ + *new_return = NULL; + return false; + } + + + /* This will process an __atomic_load function call, determine whether it + needs to be mapped to the _N variation, or turned into a library call. + LOC is the location of the builtin call. + FUNCTION is the DECL that has been invoked; + PARAMS is the argument list for the call. The return value is non-null + TRUE is returned if it is translated into the proper format for a call to the + external library, and NEW_RETURN is set the tree for that function. + FALSE is returned if processing for the _N variation is required, and + NEW_RETURN is set to the the return value the result is copied into. */ + + static bool + resolve_overloaded_atomic_load (location_t loc, tree function, + VEC(tree,gc) *params, tree *new_return) + { + tree p0, p1, p2; + tree I_type, I_type_ptr; + int n = get_atomic_generic_size (function, params); + + /* If not a lock-free size, change to the library generic format. */ + if (n != 1 && n != 2 && n != 4 && n != 8 && n != 16) + { + *new_return = add_atomic_size_parameter (n, loc, function, params); + return true; + } + + /* Otherwise, there is a match, so the call needs to be transformed from: + void fn(T* mem, T* return, model) + into + *return = (T) (fn ((In *) mem, model)) */ + + p0 = VEC_index (tree, params, 0); + p1 = VEC_index (tree, params, 1); + p2 = VEC_index (tree, params, 2); + + /* Create pointer to appropriate size. */ + I_type = builtin_type_for_size (BITS_PER_UNIT * n, 1); + I_type_ptr = build_pointer_type (I_type); + + /* Convert object pointer to required type. */ + p0 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p0); + VEC_replace (tree, params, 0, p0); + + /* Move memory model to the 2nd position, and end param list. */ + VEC_replace (tree, params, 1, p2); + VEC_truncate (tree, params, 2); + + /* Convert return pointer and dereference it for later assignment. */ + *new_return = build_indirect_ref (loc, p1, RO_UNARY_STAR); + + return false; + } + + + /* This will process an __atomic_store function call, determine whether it + needs to be mapped to the _N variation, or turned into a library call. + LOC is the location of the builtin call. + FUNCTION is the DECL that has been invoked; + PARAMS is the argument list for the call. The return value is non-null + TRUE is returned if it is translated into the proper format for a call to the + external library, and NEW_RETURN is set the tree for that function. + FALSE is returned if processing for the _N variation is required, and + NEW_RETURN is set to the the return value the result is copied into. */ + + static bool + resolve_overloaded_atomic_store (location_t loc, tree function, + VEC(tree,gc) *params, tree *new_return) + { + tree p0, p1; + tree I_type, I_type_ptr; + int n = get_atomic_generic_size (function, params); + + /* If not a lock-free size, change to the library generic format. */ + if (n != 1 && n != 2 && n != 4 && n != 8 && n != 16) + { + *new_return = add_atomic_size_parameter (n, loc, function, params); + return true; + } + + /* Otherwise, there is a match, so the call needs to be transformed from: + void fn(T* mem, T* value, model) + into + fn ((In *) mem, (In) *value, model) */ + + p0 = VEC_index (tree, params, 0); + p1 = VEC_index (tree, params, 1); + + /* Create pointer to appropriate size. */ + I_type = builtin_type_for_size (BITS_PER_UNIT * n, 1); + I_type_ptr = build_pointer_type (I_type); + + /* Convert object pointer to required type. */ + p0 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p0); + VEC_replace (tree, params, 0, p0); + + /* Convert new value to required type, and dereference it. */ + p1 = build_indirect_ref (loc, p1, RO_UNARY_STAR); + p1 = build1 (VIEW_CONVERT_EXPR, I_type, p1); + VEC_replace (tree, params, 1, p1); + + /* The memory model is in the right spot already. Return is void. */ + *new_return = NULL_TREE; + + return false; + } + + /* Some builtin functions are placeholders for other expressions. This function should be called immediately after parsing the call expression before surrounding code has committed to the type of the expression. *************** tree *** 9099,9104 **** --- 9483,9491 ---- resolve_overloaded_builtin (location_t loc, tree function, VEC(tree,gc) *params) { enum built_in_function orig_code = DECL_FUNCTION_CODE (function); + bool orig_format = true; + tree new_return = NULL_TREE; + switch (DECL_BUILT_IN_CLASS (function)) { case BUILT_IN_NORMAL: *************** resolve_overloaded_builtin (location_t l *** 9115,9120 **** --- 9502,9579 ---- /* Handle BUILT_IN_NORMAL here. */ switch (orig_code) { + case BUILT_IN_ATOMIC_EXCHANGE: + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE: + case BUILT_IN_ATOMIC_LOAD: + case BUILT_IN_ATOMIC_STORE: + { + /* Handle these 4 together so that they can fall through to the next + case if the call is transformed to an _N variant. */ + switch (orig_code) + { + case BUILT_IN_ATOMIC_EXCHANGE: + { + if (resolve_overloaded_atomic_exchange (loc, function, params, + &new_return)) + return new_return; + /* Change to the _N variant. */ + orig_code = BUILT_IN_ATOMIC_EXCHANGE_N; + break; + } + + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE: + { + if (resolve_overloaded_atomic_compare_exchange (loc, function, + params, + &new_return)) + return new_return; + /* Change to the _N variant. */ + orig_code = BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N; + break; + } + case BUILT_IN_ATOMIC_LOAD: + { + if (resolve_overloaded_atomic_load (loc, function, params, + &new_return)) + return new_return; + /* Change to the _N variant. */ + orig_code = BUILT_IN_ATOMIC_LOAD_N; + break; + } + case BUILT_IN_ATOMIC_STORE: + { + if (resolve_overloaded_atomic_store (loc, function, params, + &new_return)) + return new_return; + /* Change to the _N variant. */ + orig_code = BUILT_IN_ATOMIC_STORE_N; + break; + } + default: + gcc_unreachable (); + } + /* Fallthrough to the normal processing. */ + } + case BUILT_IN_ATOMIC_EXCHANGE_N: + case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N: + case BUILT_IN_ATOMIC_LOAD_N: + case BUILT_IN_ATOMIC_STORE_N: + case BUILT_IN_ATOMIC_ADD_FETCH_N: + case BUILT_IN_ATOMIC_SUB_FETCH_N: + case BUILT_IN_ATOMIC_AND_FETCH_N: + case BUILT_IN_ATOMIC_NAND_FETCH_N: + case BUILT_IN_ATOMIC_XOR_FETCH_N: + case BUILT_IN_ATOMIC_OR_FETCH_N: + case BUILT_IN_ATOMIC_FETCH_ADD_N: + case BUILT_IN_ATOMIC_FETCH_SUB_N: + case BUILT_IN_ATOMIC_FETCH_AND_N: + case BUILT_IN_ATOMIC_FETCH_NAND_N: + case BUILT_IN_ATOMIC_FETCH_XOR_N: + case BUILT_IN_ATOMIC_FETCH_OR_N: + { + orig_format = false; + /* Fallthru for parameter processing. */ + } case BUILT_IN_SYNC_FETCH_AND_ADD_N: case BUILT_IN_SYNC_FETCH_AND_SUB_N: case BUILT_IN_SYNC_FETCH_AND_OR_N: *************** resolve_overloaded_builtin (location_t l *** 9141,9155 **** fncode = (enum built_in_function)((int)orig_code + exact_log2 (n) + 1); new_function = builtin_decl_explicit (fncode); ! if (!sync_resolve_params (function, new_function, params)) return error_mark_node; first_param = VEC_index (tree, params, 0); result = build_function_call_vec (loc, new_function, params, NULL); if (orig_code != BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_N ! && orig_code != BUILT_IN_SYNC_LOCK_RELEASE_N) ! result = sync_resolve_return (first_param, result); ! return result; } --- 9600,9629 ---- fncode = (enum built_in_function)((int)orig_code + exact_log2 (n) + 1); new_function = builtin_decl_explicit (fncode); ! if (!sync_resolve_params (function, new_function, params, orig_format)) return error_mark_node; first_param = VEC_index (tree, params, 0); result = build_function_call_vec (loc, new_function, params, NULL); + if (result == error_mark_node) + return result; if (orig_code != BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_N ! && orig_code != BUILT_IN_SYNC_LOCK_RELEASE_N ! && orig_code != BUILT_IN_ATOMIC_STORE_N) ! result = sync_resolve_return (first_param, result, orig_format); ! ! /* If new_return is set, assign function to that expr and cast the ! result to void since the generic interface returned void. */ ! if (new_return) ! { ! /* Cast function result from I{1,2,4,8,16} to the required type. */ ! result = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (new_return), result); ! result = build2 (MODIFY_EXPR, TREE_TYPE (new_return), new_return, ! result); ! TREE_SIDE_EFFECTS (result) = 1; ! protected_set_expr_location (result, loc); ! result = convert (void_type_node, result); ! } return result; } Index: c-family/c-cppbuiltin.c =================================================================== *** c-family/c-cppbuiltin.c (.../trunk/gcc) (revision 180790) --- c-family/c-cppbuiltin.c (.../branches/cxx-mem-model/gcc) (revision 180839) *************** c_cpp_builtins (cpp_reader *pfile) *** 758,787 **** /* Tell source code if the compiler makes sync_compare_and_swap builtins available. */ ! #ifdef HAVE_sync_compare_and_swapqi ! if (HAVE_sync_compare_and_swapqi) ! cpp_define (pfile, "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1"); #endif ! #ifdef HAVE_sync_compare_and_swaphi ! if (HAVE_sync_compare_and_swaphi) ! cpp_define (pfile, "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2"); #endif ! #ifdef HAVE_sync_compare_and_swapsi ! if (HAVE_sync_compare_and_swapsi) ! cpp_define (pfile, "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4"); #endif ! #ifdef HAVE_sync_compare_and_swapdi ! if (HAVE_sync_compare_and_swapdi) ! cpp_define (pfile, "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8"); #endif ! #ifdef HAVE_sync_compare_and_swapti ! if (HAVE_sync_compare_and_swapti) ! cpp_define (pfile, "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16"); #endif #ifdef DWARF2_UNWIND_INFO if (dwarf2out_do_cfi_asm ()) --- 758,807 ---- /* Tell source code if the compiler makes sync_compare_and_swap builtins available. */ ! #ifndef HAVE_sync_compare_and_swapqi ! #define HAVE_sync_compare_and_swapqi 0 ! #endif ! #ifndef HAVE_atomic_compare_and_swapqi ! #define HAVE_atomic_compare_and_swapqi 0 #endif + if (HAVE_sync_compare_and_swapqi || HAVE_atomic_compare_and_swapqi) + cpp_define (pfile, "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1"); ! #ifndef HAVE_sync_compare_and_swaphi ! #define HAVE_sync_compare_and_swaphi 0 #endif + #ifndef HAVE_atomic_compare_and_swaphi + #define HAVE_atomic_compare_and_swaphi 0 + #endif + if (HAVE_sync_compare_and_swaphi || HAVE_atomic_compare_and_swaphi) + cpp_define (pfile, "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2"); ! #ifndef HAVE_sync_compare_and_swapsi ! #define HAVE_sync_compare_and_swapsi 0 ! #endif ! #ifndef HAVE_atomic_compare_and_swapsi ! #define HAVE_atomic_compare_and_swapsi 0 #endif + if (HAVE_sync_compare_and_swapsi || HAVE_atomic_compare_and_swapsi) + cpp_define (pfile, "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4"); ! #ifndef HAVE_sync_compare_and_swapdi ! #define HAVE_sync_compare_and_swapdi 0 #endif + #ifndef HAVE_atomic_compare_and_swapdi + #define HAVE_atomic_compare_and_swapdi 0 + #endif + if (HAVE_sync_compare_and_swapdi || HAVE_atomic_compare_and_swapdi) + cpp_define (pfile, "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8"); ! #ifndef HAVE_sync_compare_and_swapti ! #define HAVE_sync_compare_and_swapti 0 ! #endif ! #ifndef HAVE_atomic_compare_and_swapti ! #define HAVE_atomic_compare_and_swapti 0 #endif + if (HAVE_sync_compare_and_swapti || HAVE_atomic_compare_and_swapti) + cpp_define (pfile, "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16"); #ifdef DWARF2_UNWIND_INFO if (dwarf2out_do_cfi_asm ())
These changes are primarily recognition of all __atomic builtins plus
the expansion of the generic versions of the atomics. If a generic
function call uses an object which is compatible with a lock free
integral size, then the call is translated into one of those. This
provides lock free support for arbitrary user PODs.
- cxx-mem-model merge [4 of 9] c-family Andrew MacLeod
- Re: cxx-mem-model merge [4 of 9] c-family Joseph S. Myers
- Re: cxx-mem-model merge [4 of 9] c-family Andrew MacLeod
- Re: cxx-mem-model merge [4 of 9] c-family Joseph S. Myers
- Re: cxx-mem-model merge [4 of 9] c-famil... Andrew MacLeod
- Re: cxx-mem-model merge [4 of 9] c-... Jeff Law
- Re: cxx-mem-model merge [4 of 9]... Andrew MacLeod