On Thu, Aug 06, 2020 at 01:00:04PM +0100, Stuart Henderson wrote:
> On 2020/08/06 03:22, Jonathan Gray wrote:
> > On Tue, Aug 04, 2020 at 07:40:45PM -0400, George Koehler wrote:
> > > On Tue, 4 Aug 2020 23:22:14 +0100
> > > Stuart Henderson <[email protected]> wrote:
> > > 
> > > > ld.lld: error: undefined symbol: __atomic_is_lock_free
> > > 
> > > kettenis wrote an __atomic_is_lock_free for macppc when it switched to
> > > base-clang: /usr/src/lib/libcompiler_rt/ppc/atomic_lock_free.c
> > > 
> > > Do we now need to write __atomic_is_lock_free for i386?
> > > 
> > > This commit might be the cause:
> > >   [X86] Correct the value of MaxAtomicInlineWidth for pre-586 cpus
> > >   https://github.com/llvm/llvm-project/commit/7339e61
> > >   https://reviews.llvm.org/D59566
> > > 
> > > clang might call __atomic_is_lock_free to check whether the cpu can
> > > cmpxchg8b.
> > > 
> > > 
> > 
> > or we make the baseline i586?
> 
> I'm OK with doing this.
> 
> databases/mariadb still fails though:
> 
> FAILED: unittest/strings/json-t 
> : && /pobj/mariadb-10.5.4/bin/c++  -O2 -pipe  -I/usr/local/include 
> -Wl,-z,relro,-z,now -fstack-protector --param=ssp-buffer-size=4 -DNDEBUG 
> -D_FORTIFY_SOURCE=2 -DDBUG_OFF  -L/usr/local/lib 
> unittest/strings/CMakeFiles/json-t.dir/json-t.c.o
>   -o unittest/strings/json-t -L/usr/lib   -L/usr/local/lib -lpthread  
> unittest/mytap/libmytap.a  strings/libstrings.a  mysys/libmysys.a  
> dbug/libdbug.a  strings/libstrings.a  mysys/libmysys.a  dbug/libdbug.a  -lz  
> -lm  -lpthread  -Wl,-rpa
> th-link,/usr/X11R6/lib && :
> ld: error: undefined symbol: my_atomic_add32

The test used for c11 atomics at configure time fails.

updating compiler_rt atomic.c to the release/10.x branch works here

Index: atomic.c
===================================================================
RCS file: /cvs/src/lib/libcompiler_rt/atomic.c,v
retrieving revision 1.2
diff -u -p -r1.2 atomic.c
--- atomic.c    26 Dec 2017 20:59:44 -0000      1.2
+++ atomic.c    7 Aug 2020 06:06:17 -0000
@@ -1,29 +1,27 @@
-/*===-- atomic.c - Implement support functions for atomic operations.------===
- *
- *                     The LLVM Compiler Infrastructure
- *
- * This file is dual licensed under the MIT and the University of Illinois Open
- * Source Licenses. See LICENSE.TXT for details.
- *
- *===----------------------------------------------------------------------===
- *
- *  atomic.c defines a set of functions for performing atomic accesses on
- *  arbitrary-sized memory locations.  This design uses locks that should
- *  be fast in the uncontended case, for two reasons:
- * 
- *  1) This code must work with C programs that do not link to anything
- *     (including pthreads) and so it should not depend on any pthread
- *     functions.
- *  2) Atomic operations, rather than explicit mutexes, are most commonly used
- *     on code where contended operations are rate.
- * 
- *  To avoid needing a per-object lock, this code allocates an array of
- *  locks and hashes the object pointers to find the one that it should use.
- *  For operations that must be atomic on two locations, the lower lock is
- *  always acquired first, to avoid deadlock.
- *
- *===----------------------------------------------------------------------===
- */
+//===-- atomic.c - Implement support functions for atomic 
operations.------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM 
Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+//  atomic.c defines a set of functions for performing atomic accesses on
+//  arbitrary-sized memory locations.  This design uses locks that should
+//  be fast in the uncontended case, for two reasons:
+//
+//  1) This code must work with C programs that do not link to anything
+//     (including pthreads) and so it should not depend on any pthread
+//     functions.
+//  2) Atomic operations, rather than explicit mutexes, are most commonly used
+//     on code where contended operations are rate.
+//
+//  To avoid needing a per-object lock, this code allocates an array of
+//  locks and hashes the object pointers to find the one that it should use.
+//  For operations that must be atomic on two locations, the lower lock is
+//  always acquired first, to avoid deadlock.
+//
+//===----------------------------------------------------------------------===//
 
 #include <stdint.h>
 #include <string.h>
@@ -35,13 +33,14 @@
 #pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
 #pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store)
 #pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
-#pragma redefine_extname __atomic_compare_exchange_c 
SYMBOL_NAME(__atomic_compare_exchange)
+#pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME(              
\
+    __atomic_compare_exchange)
 
 /// Number of locks.  This allocates one page on 32-bit platforms, two on
 /// 64-bit.  This can be specified externally if a different trade between
 /// memory usage and contention probability is required for a given platform.
 #ifndef SPINLOCK_COUNT
-#define SPINLOCK_COUNT (1<<10)
+#define SPINLOCK_COUNT (1 << 10)
 #endif
 static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
 
@@ -52,38 +51,37 @@ static const long SPINLOCK_MASK = SPINLO
 
////////////////////////////////////////////////////////////////////////////////
 #ifdef __FreeBSD__
 #include <errno.h>
+// clang-format off
 #include <sys/types.h>
 #include <machine/atomic.h>
 #include <sys/umtx.h>
+// clang-format on
 typedef struct _usem Lock;
 __inline static void unlock(Lock *l) {
-  __c11_atomic_store((_Atomic(uint32_t)*)&l->_count, 1, __ATOMIC_RELEASE);
+  __c11_atomic_store((_Atomic(uint32_t) *)&l->_count, 1, __ATOMIC_RELEASE);
   __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
   if (l->_has_waiters)
-      _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
+    _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
 }
 __inline static void lock(Lock *l) {
   uint32_t old = 1;
-  while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t)*)&l->_count, 
&old,
-        0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+  while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t) *)&l->_count,
+                                             &old, 0, __ATOMIC_ACQUIRE,
+                                             __ATOMIC_RELAXED)) {
     _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
     old = 1;
   }
 }
 /// locks for atomic operations
-static Lock locks[SPINLOCK_COUNT] = { [0 ...  SPINLOCK_COUNT-1] = {0,1,0} };
+static Lock locks[SPINLOCK_COUNT] = {[0 ... SPINLOCK_COUNT - 1] = {0, 1, 0}};
 
 #elif defined(__APPLE__)
 #include <libkern/OSAtomic.h>
 typedef OSSpinLock Lock;
-__inline static void unlock(Lock *l) {
-  OSSpinLockUnlock(l);
-}
+__inline static void unlock(Lock *l) { OSSpinLockUnlock(l); }
 /// Locks a lock.  In the current implementation, this is potentially
 /// unbounded in the contended case.
-__inline static void lock(Lock *l) {
-  OSSpinLockLock(l);
-}
+__inline static void lock(Lock *l) { OSSpinLockLock(l); }
 static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
 
 #else
@@ -97,20 +95,19 @@ __inline static void unlock(Lock *l) {
 __inline static void lock(Lock *l) {
   uintptr_t old = 0;
   while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
-        __ATOMIC_RELAXED))
+                                             __ATOMIC_RELAXED))
     old = 0;
 }
 /// locks for atomic operations
 static Lock locks[SPINLOCK_COUNT];
 #endif
 
-
-/// Returns a lock to use for a given pointer.  
+/// Returns a lock to use for a given pointer.
 static __inline Lock *lock_for_pointer(void *ptr) {
   intptr_t hash = (intptr_t)ptr;
   // Disregard the lowest 4 bits.  We want all values that may be part of the
   // same memory operation to hash to the same value and therefore use the same
-  // lock.  
+  // lock.
   hash >>= 4;
   // Use the next bits as the basis for the hash
   intptr_t low = hash & SPINLOCK_MASK;
@@ -133,36 +130,44 @@ static __inline Lock *lock_for_pointer(v
 
 /// Macro that calls the compiler-generated lock-free versions of functions
 /// when they exist.
-#define LOCK_FREE_CASES() \
-  do {\
-  switch (size) {\
-    case 2:\
-      if (IS_LOCK_FREE_2) {\
-        LOCK_FREE_ACTION(uint16_t);\
-      }\
-    case 4:\
-      if (IS_LOCK_FREE_4) {\
-        LOCK_FREE_ACTION(uint32_t);\
-      }\
-    case 8:\
-      if (IS_LOCK_FREE_8) {\
-        LOCK_FREE_ACTION(uint64_t);\
-      }\
-    case 16:\
-      if (IS_LOCK_FREE_16) {\
-        /* FIXME: __uint128_t isn't available on 32 bit platforms.
-        LOCK_FREE_ACTION(__uint128_t);*/\
-      }\
-  }\
+#define LOCK_FREE_CASES()                                                      
\
+  do {                                                                         
\
+    switch (size) {                                                            
\
+    case 1:                                                                    
\
+      if (IS_LOCK_FREE_1) {                                                    
\
+        LOCK_FREE_ACTION(uint8_t);                                             
\
+      }                                                                        
\
+      break;                                                                   
\
+    case 2:                                                                    
\
+      if (IS_LOCK_FREE_2) {                                                    
\
+        LOCK_FREE_ACTION(uint16_t);                                            
\
+      }                                                                        
\
+      break;                                                                   
\
+    case 4:                                                                    
\
+      if (IS_LOCK_FREE_4) {                                                    
\
+        LOCK_FREE_ACTION(uint32_t);                                            
\
+      }                                                                        
\
+      break;                                                                   
\
+    case 8:                                                                    
\
+      if (IS_LOCK_FREE_8) {                                                    
\
+        LOCK_FREE_ACTION(uint64_t);                                            
\
+      }                                                                        
\
+      break;                                                                   
\
+    case 16:                                                                   
\
+      if (IS_LOCK_FREE_16) {                                                   
\
+        /* FIXME: __uint128_t isn't available on 32 bit platforms.             
\
+        LOCK_FREE_ACTION(__uint128_t);*/                                       
\
+      }                                                                        
\
+      break;                                                                   
\
+    }                                                                          
\
   } while (0)
 
-
 /// An atomic load operation.  This is atomic with respect to the source
 /// pointer only.
 void __atomic_load_c(int size, void *src, void *dest, int model) {
-#define LOCK_FREE_ACTION(type) \
-    *((type*)dest) = __c11_atomic_load((_Atomic(type)*)src, model);\
-    return;
+#define LOCK_FREE_ACTION(type)                                                 
\
+  *((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model);            
\
+  return;
   LOCK_FREE_CASES();
 #undef LOCK_FREE_ACTION
   Lock *l = lock_for_pointer(src);
@@ -174,9 +179,9 @@ void __atomic_load_c(int size, void *src
 /// An atomic store operation.  This is atomic with respect to the destination
 /// pointer only.
 void __atomic_store_c(int size, void *dest, void *src, int model) {
-#define LOCK_FREE_ACTION(type) \
-    __c11_atomic_store((_Atomic(type)*)dest, *(type*)dest, model);\
-    return;
+#define LOCK_FREE_ACTION(type)                                                 
\
+  __c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model);              
\
+  return;
   LOCK_FREE_CASES();
 #undef LOCK_FREE_ACTION
   Lock *l = lock_for_pointer(dest);
@@ -189,12 +194,13 @@ void __atomic_store_c(int size, void *de
 /// to the value at *expected, then this copies value at *desired to *ptr.  If
 /// they  are not, then this stores the current value from *ptr in *expected.
 ///
-/// This function returns 1 if the exchange takes place or 0 if it fails. 
+/// This function returns 1 if the exchange takes place or 0 if it fails.
 int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
-    void *desired, int success, int failure) {
-#define LOCK_FREE_ACTION(type) \
-  return __c11_atomic_compare_exchange_strong((_Atomic(type)*)ptr, 
(type*)expected,\
-      *(type*)desired, success, failure)
+                                void *desired, int success, int failure) {
+#define LOCK_FREE_ACTION(type)                                                 
\
+  return __c11_atomic_compare_exchange_strong(                                 
\
+      (_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success,       
\
+      failure)
   LOCK_FREE_CASES();
 #undef LOCK_FREE_ACTION
   Lock *l = lock_for_pointer(ptr);
@@ -212,10 +218,10 @@ int __atomic_compare_exchange_c(int size
 /// Performs an atomic exchange operation between two pointers.  This is atomic
 /// with respect to the target address.
 void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) 
{
-#define LOCK_FREE_ACTION(type) \
-    *(type*)old = __c11_atomic_exchange((_Atomic(type)*)ptr, *(type*)val,\
-        model);\
-    return;
+#define LOCK_FREE_ACTION(type)                                                 
\
+  *(type *)old =                                                               
\
+      __c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model);        
\
+  return;
   LOCK_FREE_CASES();
 #undef LOCK_FREE_ACTION
   Lock *l = lock_for_pointer(ptr);
@@ -230,96 +236,96 @@ void __atomic_exchange_c(int size, void 
 // specialised versions of the above functions.
 
////////////////////////////////////////////////////////////////////////////////
 #ifdef __SIZEOF_INT128__
-#define OPTIMISED_CASES\
-  OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
-  OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
-  OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
-  OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)\
+#define OPTIMISED_CASES                                                        
\
+  OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)                                   
\
+  OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)                                  
\
+  OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)                                  
\
+  OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)                                  
\
   OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)
 #else
-#define OPTIMISED_CASES\
-  OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
-  OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
-  OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
+#define OPTIMISED_CASES                                                        
\
+  OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)                                   
\
+  OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)                                  
\
+  OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)                                  
\
   OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)
 #endif
 
-#define OPTIMISED_CASE(n, lockfree, type)\
-type __atomic_load_##n(type *src, int model) {\
-  if (lockfree)\
-    return __c11_atomic_load((_Atomic(type)*)src, model);\
-  Lock *l = lock_for_pointer(src);\
-  lock(l);\
-  type val = *src;\
-  unlock(l);\
-  return val;\
-}
+#define OPTIMISED_CASE(n, lockfree, type)                                      
\
+  type __atomic_load_##n(type *src, int model) {                               
\
+    if (lockfree)                                                              
\
+      return __c11_atomic_load((_Atomic(type) *)src, model);                   
\
+    Lock *l = lock_for_pointer(src);                                           
\
+    lock(l);                                                                   
\
+    type val = *src;                                                           
\
+    unlock(l);                                                                 
\
+    return val;                                                                
\
+  }
 OPTIMISED_CASES
 #undef OPTIMISED_CASE
 
-#define OPTIMISED_CASE(n, lockfree, type)\
-void  __atomic_store_##n(type *dest, type val, int model) {\
-  if (lockfree) {\
-    __c11_atomic_store((_Atomic(type)*)dest, val, model);\
-    return;\
-  }\
-  Lock *l = lock_for_pointer(dest);\
-  lock(l);\
-  *dest = val;\
-  unlock(l);\
-  return;\
-}
+#define OPTIMISED_CASE(n, lockfree, type)                                      
\
+  void __atomic_store_##n(type *dest, type val, int model) {                   
\
+    if (lockfree) {                                                            
\
+      __c11_atomic_store((_Atomic(type) *)dest, val, model);                   
\
+      return;                                                                  
\
+    }                                                                          
\
+    Lock *l = lock_for_pointer(dest);                                          
\
+    lock(l);                                                                   
\
+    *dest = val;                                                               
\
+    unlock(l);                                                                 
\
+    return;                                                                    
\
+  }
 OPTIMISED_CASES
 #undef OPTIMISED_CASE
 
-#define OPTIMISED_CASE(n, lockfree, type)\
-type __atomic_exchange_##n(type *dest, type val, int model) {\
-  if (lockfree)\
-    return __c11_atomic_exchange((_Atomic(type)*)dest, val, model);\
-  Lock *l = lock_for_pointer(dest);\
-  lock(l);\
-  type tmp = *dest;\
-  *dest = val;\
-  unlock(l);\
-  return tmp;\
-}
+#define OPTIMISED_CASE(n, lockfree, type)                                      
\
+  type __atomic_exchange_##n(type *dest, type val, int model) {                
\
+    if (lockfree)                                                              
\
+      return __c11_atomic_exchange((_Atomic(type) *)dest, val, model);         
\
+    Lock *l = lock_for_pointer(dest);                                          
\
+    lock(l);                                                                   
\
+    type tmp = *dest;                                                          
\
+    *dest = val;                                                               
\
+    unlock(l);                                                                 
\
+    return tmp;                                                                
\
+  }
 OPTIMISED_CASES
 #undef OPTIMISED_CASE
 
-#define OPTIMISED_CASE(n, lockfree, type)\
-int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired,\
-    int success, int failure) {\
-  if (lockfree)\
-    return __c11_atomic_compare_exchange_strong((_Atomic(type)*)ptr, expected, 
desired,\
-        success, failure);\
-  Lock *l = lock_for_pointer(ptr);\
-  lock(l);\
-  if (*ptr == *expected) {\
-    *ptr = desired;\
-    unlock(l);\
-    return 1;\
-  }\
-  *expected = *ptr;\
-  unlock(l);\
-  return 0;\
-}
+#define OPTIMISED_CASE(n, lockfree, type)                                      
\
+  int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired,   
\
+                                    int success, int failure) {                
\
+    if (lockfree)                                                              
\
+      return __c11_atomic_compare_exchange_strong(                             
\
+          (_Atomic(type) *)ptr, expected, desired, success, failure);          
\
+    Lock *l = lock_for_pointer(ptr);                                           
\
+    lock(l);                                                                   
\
+    if (*ptr == *expected) {                                                   
\
+      *ptr = desired;                                                          
\
+      unlock(l);                                                               
\
+      return 1;                                                                
\
+    }                                                                          
\
+    *expected = *ptr;                                                          
\
+    unlock(l);                                                                 
\
+    return 0;                                                                  
\
+  }
 OPTIMISED_CASES
 #undef OPTIMISED_CASE
 
 
////////////////////////////////////////////////////////////////////////////////
 // Atomic read-modify-write operations for integers of various sizes.
 
////////////////////////////////////////////////////////////////////////////////
-#define ATOMIC_RMW(n, lockfree, type, opname, op) \
-type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) {\
-  if (lockfree) \
-    return __c11_atomic_fetch_##opname((_Atomic(type)*)ptr, val, model);\
-  Lock *l = lock_for_pointer(ptr);\
-  lock(l);\
-  type tmp = *ptr;\
-  *ptr = tmp op val;\
-  unlock(l);\
-  return tmp;\
-}
+#define ATOMIC_RMW(n, lockfree, type, opname, op)                              
\
+  type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) {         
\
+    if (lockfree)                                                              
\
+      return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model);    
\
+    Lock *l = lock_for_pointer(ptr);                                           
\
+    lock(l);                                                                   
\
+    type tmp = *ptr;                                                           
\
+    *ptr = tmp op val;                                                         
\
+    unlock(l);                                                                 
\
+    return tmp;                                                                
\
+  }
 
 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
 OPTIMISED_CASES

Reply via email to