This patch changes libgo to use the __atomic intrinsics instead of the older __sync intrinsics. libgo already used some __atomic calls; this replaces all the __sync calls. GCC has supported the __atomic intrinsics since 4.7. They are better than the __sync intrinsics in that they specify a memory model and, more importantly for our purposes, they are reliably implemented either in the compiler or in libatomic. This fixes the reopened GCC PR 52084. Bootstrapped and ran Go testsuite on x86_64-pc-linux-gnu. Committed to mainline.
Ian
Index: gcc/go/gofrontend/MERGE =================================================================== --- gcc/go/gofrontend/MERGE (revision 268450) +++ gcc/go/gofrontend/MERGE (working copy) @@ -1,4 +1,4 @@ -cbcc538adc5177778da5788d1101e16f106a1514 +c49ad6c4e66fa7ca992d947a5f0377090abadf6b The first line of this file holds the git revision number of the last merge done from the gofrontend repository. Index: libgo/Makefile.am =================================================================== --- libgo/Makefile.am (revision 268369) +++ libgo/Makefile.am (working copy) @@ -471,7 +471,6 @@ runtime_files = \ runtime/proc.c \ runtime/runtime_c.c \ runtime/stack.c \ - runtime/thread.c \ runtime/yield.c \ $(rtems_task_variable_add_file) \ $(runtime_getncpu_file) Index: libgo/configure.ac =================================================================== --- libgo/configure.ac (revision 268369) +++ libgo/configure.ac (working copy) @@ -578,62 +578,6 @@ LIBS="$LIBS $MATH_LIBS" AC_CHECK_FUNCS(matherr) LIBS="$LIBS_hold" -AC_CACHE_CHECK([for __sync_bool_compare_and_swap_4], -[libgo_cv_func___sync_bool_compare_and_swap_4], -[AC_LINK_IFELSE([AC_LANG_SOURCE([ -typedef unsigned int uint32 __attribute__ ((mode (SI))); -uint32 i; -int main() { return __sync_bool_compare_and_swap (&i, 0, 1); } -])], -[libgo_cv_func___sync_bool_compare_and_swap_4=yes], -[libgo_cv_func___sync_bool_compare_and_swap_4=no])]) -if test "$libgo_cv_func___sync_bool_compare_and_swap_4" = "yes"; then - AC_DEFINE(HAVE_SYNC_BOOL_COMPARE_AND_SWAP_4, 1, - [Define to 1 if the compiler provides the __sync_bool_compare_and_swap function for uint32]) -fi - -AC_CACHE_CHECK([for __sync_bool_compare_and_swap_8], -[libgo_cv_func___sync_bool_compare_and_swap_8], -[AC_LINK_IFELSE([AC_LANG_SOURCE([ -typedef unsigned int uint64 __attribute__ ((mode (DI))); -uint64 i; -int main() { return __sync_bool_compare_and_swap (&i, 0, 1); } -])], -[libgo_cv_func___sync_bool_compare_and_swap_8=yes], -[libgo_cv_func___sync_bool_compare_and_swap_8=no])]) -if test "$libgo_cv_func___sync_bool_compare_and_swap_8" = "yes"; then - AC_DEFINE(HAVE_SYNC_BOOL_COMPARE_AND_SWAP_8, 1, - [Define to 1 if the compiler provides the __sync_bool_compare_and_swap function for uint64]) -fi - -AC_CACHE_CHECK([for __sync_fetch_and_add_4], -[libgo_cv_func___sync_fetch_and_add_4], -[AC_LINK_IFELSE([AC_LANG_SOURCE([ -typedef unsigned int uint32 __attribute__ ((mode (SI))); -uint32 i; -int main() { return __sync_fetch_and_add (&i, 1); } -])], -[libgo_cv_func___sync_fetch_and_add_4=yes], -[libgo_cv_func___sync_fetch_and_add_4=no])]) -if test "$libgo_cv_func___sync_fetch_and_add_4" = "yes"; then - AC_DEFINE(HAVE_SYNC_FETCH_AND_ADD_4, 1, - [Define to 1 if the compiler provides the __sync_fetch_and_add function for uint32]) -fi - -AC_CACHE_CHECK([for __sync_add_and_fetch_8], -[libgo_cv_func___sync_add_and_fetch_8], -[AC_LINK_IFELSE([AC_LANG_SOURCE([ -typedef unsigned int uint64 __attribute__ ((mode (DI))); -uint64 i; -int main() { return __sync_add_and_fetch (&i, 1); } -])], -[libgo_cv_func___sync_add_and_fetch_8=yes], -[libgo_cv_func___sync_add_and_fetch_8=no])]) -if test "$libgo_cv_func___sync_add_and_fetch_8" = "yes"; then - AC_DEFINE(HAVE_SYNC_ADD_AND_FETCH_8, 1, - [Define to 1 if the compiler provides the __sync_add_and_fetch function for uint64]) -fi - dnl For x86 we want to use the -minline-all-stringops option to avoid dnl forcing a stack split when calling memcpy and friends. AC_CACHE_CHECK([whether compiler supports -minline-all-stringops], Index: libgo/go/runtime/testdata/testprogcgo/lockosthread.c =================================================================== --- libgo/go/runtime/testdata/testprogcgo/lockosthread.c (revision 268369) +++ libgo/go/runtime/testdata/testprogcgo/lockosthread.c (working copy) @@ -9,5 +9,5 @@ uint32_t threadExited; void setExited(void *x) { - __sync_fetch_and_add(&threadExited, 1); + __atomic_add_fetch(&threadExited, 1, __ATOMIC_SEQ_CST); } Index: libgo/go/runtime/testdata/testprogcgo/threadpprof.go =================================================================== --- libgo/go/runtime/testdata/testprogcgo/threadpprof.go (revision 268369) +++ libgo/go/runtime/testdata/testprogcgo/threadpprof.go (working copy) @@ -50,13 +50,13 @@ void pprofCgoThreadTraceback(void* parg) arg->buf[0] = (uintptr_t)(cpuHogThread) + 0x10; arg->buf[1] = (uintptr_t)(cpuHogThread2) + 0x4; arg->buf[2] = 0; - __sync_add_and_fetch(&cpuHogThreadCount, 1); + __atomic_add_fetch(&cpuHogThreadCount, 1, __ATOMIC_SEQ_CST); } // getCPUHogThreadCount fetches the number of times we've seen cpuHogThread // in the traceback. int getCPUHogThreadCount() { - return __sync_add_and_fetch(&cpuHogThreadCount, 0); + return __atomic_load(&cpuHogThreadCount, __ATOMIC_SEQ_CST); } static void* cpuHogDriver(void* arg __attribute__ ((unused))) { Index: libgo/go/runtime/testdata/testprogcgo/tracebackctxt_c.c =================================================================== --- libgo/go/runtime/testdata/testprogcgo/tracebackctxt_c.c (revision 268369) +++ libgo/go/runtime/testdata/testprogcgo/tracebackctxt_c.c (working copy) @@ -49,18 +49,18 @@ struct cgoSymbolizerArg { static int contextCount; int getContextCount() { - return __sync_add_and_fetch(&contextCount, 0); + return __atomic_load_n(&contextCount, __ATOMIC_SEQ_CST); } void tcContext(void* parg) { struct cgoContextArg* arg = (struct cgoContextArg*)(parg); if (arg->context == 0) { - arg->context = __sync_add_and_fetch(&contextCount, 1); + arg->context = __atomic_add_fetch(&contextCount, 1, __ATOMIC_SEQ_CST); } else { - if (arg->context != __sync_add_and_fetch(&contextCount, 0)) { + if (arg->context != __atomic_load_n(&contextCount, __ATOMIC_SEQ_CST)) abort(); } - __sync_sub_and_fetch(&contextCount, 1); + __atomic_sub_fetch(&contextCount, 1, __ATOMIC_SEQ_CST); } } Index: libgo/go/sync/atomic/atomic.c =================================================================== --- libgo/go/sync/atomic/atomic.c (revision 268369) +++ libgo/go/sync/atomic/atomic.c (working copy) @@ -69,7 +69,8 @@ _Bool CompareAndSwapInt32 (int32_t *, in _Bool CompareAndSwapInt32 (int32_t *val, int32_t old, int32_t new) { - return __sync_bool_compare_and_swap (val, old, new); + return __atomic_compare_exchange_n (val, &old, new, true, __ATOMIC_SEQ_CST, + __ATOMIC_RELAXED); } _Bool CompareAndSwapInt64 (int64_t *, int64_t, int64_t) @@ -81,7 +82,8 @@ CompareAndSwapInt64 (int64_t *val, int64 { if (((uintptr_t) val & 7) != 0) val = NULL; - return __sync_bool_compare_and_swap (val, old, new); + return __atomic_compare_exchange_n (val, &old, new, true, __ATOMIC_SEQ_CST, + __ATOMIC_RELAXED); } _Bool CompareAndSwapUint32 (uint32_t *, uint32_t, uint32_t) @@ -91,7 +93,8 @@ _Bool CompareAndSwapUint32 (uint32_t *, _Bool CompareAndSwapUint32 (uint32_t *val, uint32_t old, uint32_t new) { - return __sync_bool_compare_and_swap (val, old, new); + return __atomic_compare_exchange_n (val, &old, new, true, __ATOMIC_SEQ_CST, + __ATOMIC_RELAXED); } _Bool CompareAndSwapUint64 (uint64_t *, uint64_t, uint64_t) @@ -103,7 +106,8 @@ CompareAndSwapUint64 (uint64_t *val, uin { if (((uintptr_t) val & 7) != 0) val = NULL; - return __sync_bool_compare_and_swap (val, old, new); + return __atomic_compare_exchange_n (val, &old, new, true, __ATOMIC_SEQ_CST, + __ATOMIC_RELAXED); } _Bool CompareAndSwapUintptr (uintptr_t *, uintptr_t, uintptr_t) @@ -113,7 +117,8 @@ _Bool CompareAndSwapUintptr (uintptr_t * _Bool CompareAndSwapUintptr (uintptr_t *val, uintptr_t old, uintptr_t new) { - return __sync_bool_compare_and_swap (val, old, new); + return __atomic_compare_exchange_n (val, &old, new, true, __ATOMIC_SEQ_CST, + __ATOMIC_RELAXED); } int32_t AddInt32 (int32_t *, int32_t) @@ -123,7 +128,7 @@ int32_t AddInt32 (int32_t *, int32_t) int32_t AddInt32 (int32_t *val, int32_t delta) { - return __sync_add_and_fetch (val, delta); + return __atomic_add_fetch (val, delta, __ATOMIC_SEQ_CST); } uint32_t AddUint32 (uint32_t *, uint32_t) @@ -133,7 +138,7 @@ uint32_t AddUint32 (uint32_t *, uint32_t uint32_t AddUint32 (uint32_t *val, uint32_t delta) { - return __sync_add_and_fetch (val, delta); + return __atomic_add_fetch (val, delta, __ATOMIC_SEQ_CST); } int64_t AddInt64 (int64_t *, int64_t) @@ -145,7 +150,7 @@ AddInt64 (int64_t *val, int64_t delta) { if (((uintptr_t) val & 7) != 0) val = NULL; - return __sync_add_and_fetch (val, delta); + return __atomic_add_fetch (val, delta, __ATOMIC_SEQ_CST); } uint64_t AddUint64 (uint64_t *, uint64_t) @@ -157,7 +162,7 @@ AddUint64 (uint64_t *val, uint64_t delta { if (((uintptr_t) val & 7) != 0) val = NULL; - return __sync_add_and_fetch (val, delta); + return __atomic_add_fetch (val, delta, __ATOMIC_SEQ_CST); } uintptr_t AddUintptr (uintptr_t *, uintptr_t) @@ -167,7 +172,7 @@ uintptr_t AddUintptr (uintptr_t *, uintp uintptr_t AddUintptr (uintptr_t *val, uintptr_t delta) { - return __sync_add_and_fetch (val, delta); + return __atomic_add_fetch (val, delta, __ATOMIC_SEQ_CST); } int32_t LoadInt32 (int32_t *addr) @@ -177,12 +182,7 @@ int32_t LoadInt32 (int32_t *addr) int32_t LoadInt32 (int32_t *addr) { - int32_t v; - - v = *addr; - while (! __sync_bool_compare_and_swap (addr, v, v)) - v = *addr; - return v; + return __atomic_load_n (addr, __ATOMIC_SEQ_CST); } int64_t LoadInt64 (int64_t *addr) @@ -192,14 +192,9 @@ int64_t LoadInt64 (int64_t *addr) int64_t LoadInt64 (int64_t *addr) { - int64_t v; - if (((uintptr_t) addr & 7) != 0) panicmem (); - v = *addr; - while (! __sync_bool_compare_and_swap (addr, v, v)) - v = *addr; - return v; + return __atomic_load_n (addr, __ATOMIC_SEQ_CST); } uint32_t LoadUint32 (uint32_t *addr) @@ -209,12 +204,7 @@ uint32_t LoadUint32 (uint32_t *addr) uint32_t LoadUint32 (uint32_t *addr) { - uint32_t v; - - v = *addr; - while (! __sync_bool_compare_and_swap (addr, v, v)) - v = *addr; - return v; + return __atomic_load_n (addr, __ATOMIC_SEQ_CST); } uint64_t LoadUint64 (uint64_t *addr) @@ -224,14 +214,9 @@ uint64_t LoadUint64 (uint64_t *addr) uint64_t LoadUint64 (uint64_t *addr) { - uint64_t v; - if (((uintptr_t) addr & 7) != 0) panicmem (); - v = *addr; - while (! __sync_bool_compare_and_swap (addr, v, v)) - v = *addr; - return v; + return __atomic_load_n (addr, __ATOMIC_SEQ_CST); } uintptr_t LoadUintptr (uintptr_t *addr) @@ -241,12 +226,7 @@ uintptr_t LoadUintptr (uintptr_t *addr) uintptr_t LoadUintptr (uintptr_t *addr) { - uintptr_t v; - - v = *addr; - while (! __sync_bool_compare_and_swap (addr, v, v)) - v = *addr; - return v; + return __atomic_load_n (addr, __ATOMIC_SEQ_CST); } void *LoadPointer (void **addr) @@ -256,12 +236,7 @@ void *LoadPointer (void **addr) void * LoadPointer (void **addr) { - void *v; - - v = *addr; - while (! __sync_bool_compare_and_swap (addr, v, v)) - v = *addr; - return v; + return __atomic_load_n (addr, __ATOMIC_SEQ_CST); } void StoreInt32 (int32_t *addr, int32_t val) @@ -271,11 +246,7 @@ void StoreInt32 (int32_t *addr, int32_t void StoreInt32 (int32_t *addr, int32_t val) { - int32_t v; - - v = *addr; - while (! __sync_bool_compare_and_swap (addr, v, val)) - v = *addr; + __atomic_store_n (addr, val, __ATOMIC_SEQ_CST); } void StoreInt64 (int64_t *addr, int64_t val) @@ -285,13 +256,9 @@ void StoreInt64 (int64_t *addr, int64_t void StoreInt64 (int64_t *addr, int64_t val) { - int64_t v; - if (((uintptr_t) addr & 7) != 0) panicmem (); - v = *addr; - while (! __sync_bool_compare_and_swap (addr, v, val)) - v = *addr; + __atomic_store_n (addr, val, __ATOMIC_SEQ_CST); } void StoreUint32 (uint32_t *addr, uint32_t val) @@ -301,11 +268,7 @@ void StoreUint32 (uint32_t *addr, uint32 void StoreUint32 (uint32_t *addr, uint32_t val) { - uint32_t v; - - v = *addr; - while (! __sync_bool_compare_and_swap (addr, v, val)) - v = *addr; + __atomic_store_n (addr, val, __ATOMIC_SEQ_CST); } void StoreUint64 (uint64_t *addr, uint64_t val) @@ -315,13 +278,9 @@ void StoreUint64 (uint64_t *addr, uint64 void StoreUint64 (uint64_t *addr, uint64_t val) { - uint64_t v; - if (((uintptr_t) addr & 7) != 0) panicmem (); - v = *addr; - while (! __sync_bool_compare_and_swap (addr, v, val)) - v = *addr; + __atomic_store_n (addr, val, __ATOMIC_SEQ_CST); } void StoreUintptr (uintptr_t *addr, uintptr_t val) @@ -331,9 +290,5 @@ void StoreUintptr (uintptr_t *addr, uint void StoreUintptr (uintptr_t *addr, uintptr_t val) { - uintptr_t v; - - v = *addr; - while (! __sync_bool_compare_and_swap (addr, v, val)) - v = *addr; + __atomic_store_n (addr, val, __ATOMIC_SEQ_CST); } Index: libgo/go/sync/cas.c =================================================================== --- libgo/go/sync/cas.c (revision 268369) +++ libgo/go/sync/cas.c (nonexistent) @@ -1,17 +0,0 @@ -/* cas.c -- implement sync.cas for Go. - - Copyright 2009 The Go Authors. All rights reserved. - Use of this source code is governed by a BSD-style - license that can be found in the LICENSE file. */ - -#include <stdint.h> - -#include "runtime.h" - -_Bool cas (int32_t *, int32_t, int32_t) __asm__ (GOSYM_PREFIX "libgo_sync.sync.cas"); - -_Bool -cas (int32_t *ptr, int32_t old, int32_t new) -{ - return __sync_bool_compare_and_swap (ptr, old, new); -} Index: libgo/misc/cgo/test/issue7978.go =================================================================== --- libgo/misc/cgo/test/issue7978.go (revision 268369) +++ libgo/misc/cgo/test/issue7978.go (working copy) @@ -12,33 +12,18 @@ package cgotest void issue7978cb(void); -#if defined(__APPLE__) && defined(__arm__) -// on Darwin/ARM, libSystem doesn't provide implementation of the __sync_fetch_and_add -// primitive, and although gcc supports it, it doesn't inline its definition. -// Clang could inline its definition, so we require clang on Darwin/ARM. -#if defined(__clang__) -#define HAS_SYNC_FETCH_AND_ADD 1 -#else -#define HAS_SYNC_FETCH_AND_ADD 0 -#endif -#else -#define HAS_SYNC_FETCH_AND_ADD 1 -#endif - // use ugly atomic variable sync since that doesn't require calling back into // Go code or OS dependencies static void issue7978c(uint32_t *sync) { -#if HAS_SYNC_FETCH_AND_ADD - while(__sync_fetch_and_add(sync, 0) != 0) + while(__atomic_load_n(sync, __ATOMIC_SEQ_CST) != 0) ; - __sync_fetch_and_add(sync, 1); - while(__sync_fetch_and_add(sync, 0) != 2) + __atomic_add_fetch(sync, 1, __ATOMIC_SEQ_CST); + while(__atomic_load_n(sync, __ATOMIC_SEQ_CST) != 2) ; issue7978cb(); - __sync_fetch_and_add(sync, 1); - while(__sync_fetch_and_add(sync, 0) != 6) + __atomic_add_fetch(sync, 1, __ATOMIC_SEQ_CST); + while(__atomic_load_n(sync, __ATOMIC_SEQ_CST) != 6) ; -#endif } */ import "C" @@ -111,9 +96,6 @@ func test7978(t *testing.T) { if runtime.Compiler == "gccgo" { t.Skip("gccgo can not do stack traces of C code") } - if C.HAS_SYNC_FETCH_AND_ADD == 0 { - t.Skip("clang required for __sync_fetch_and_add support on darwin/arm") - } debug.SetTraceback("2") issue7978sync = 0 go issue7978go() Index: libgo/runtime/runtime.h =================================================================== --- libgo/runtime/runtime.h (revision 268369) +++ libgo/runtime/runtime.h (working copy) @@ -276,22 +276,8 @@ int32 runtime_timediv(int64, int32, int3 int32 runtime_round2(int32 x); // round x up to a power of 2. // atomic operations -#define runtime_cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new) -#define runtime_cas64(pval, old, new) __sync_bool_compare_and_swap (pval, old, new) -#define runtime_casp(pval, old, new) __sync_bool_compare_and_swap (pval, old, new) -// Don't confuse with XADD x86 instruction, -// this one is actually 'addx', that is, add-and-fetch. -#define runtime_xadd(p, v) __sync_add_and_fetch (p, v) -#define runtime_xadd64(p, v) __sync_add_and_fetch (p, v) -#define runtime_xchg(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST) -#define runtime_xchg64(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST) -#define runtime_xchgp(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST) +#define runtime_xadd(p, v) __atomic_add_fetch (p, v, __ATOMIC_SEQ_CST) #define runtime_atomicload(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) -#define runtime_atomicstore(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) -#define runtime_atomicstore64(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) -#define runtime_atomicload64(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) -#define runtime_atomicloadp(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) -#define runtime_atomicstorep(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) void runtime_setg(G*) __asm__ (GOSYM_PREFIX "runtime.setg"); Index: libgo/runtime/thread.c =================================================================== --- libgo/runtime/thread.c (revision 268369) +++ libgo/runtime/thread.c (nonexistent) @@ -1,161 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include <errno.h> -#include <signal.h> -#include <sys/time.h> -#include <sys/resource.h> - -#include "runtime.h" -#include "go-assert.h" - -/* For targets which don't have the required sync support. Really - these should be provided by gcc itself. FIXME. */ - -#if !defined (HAVE_SYNC_BOOL_COMPARE_AND_SWAP_4) || !defined (HAVE_SYNC_BOOL_COMPARE_AND_SWAP_8) || !defined (HAVE_SYNC_FETCH_AND_ADD_4) || !defined (HAVE_SYNC_ADD_AND_FETCH_8) - -static pthread_mutex_t sync_lock = PTHREAD_MUTEX_INITIALIZER; - -#endif - -#ifndef HAVE_SYNC_BOOL_COMPARE_AND_SWAP_4 - -_Bool -__sync_bool_compare_and_swap_4 (uint32*, uint32, uint32) - __attribute__ ((visibility ("hidden"))); - -_Bool -__sync_bool_compare_and_swap_4 (uint32* ptr, uint32 old, uint32 new) -{ - int i; - _Bool ret; - - i = pthread_mutex_lock (&sync_lock); - __go_assert (i == 0); - - if (*ptr != old) - ret = 0; - else - { - *ptr = new; - ret = 1; - } - - i = pthread_mutex_unlock (&sync_lock); - __go_assert (i == 0); - - return ret; -} - -#endif - -#ifndef HAVE_SYNC_BOOL_COMPARE_AND_SWAP_8 - -_Bool -__sync_bool_compare_and_swap_8 (uint64*, uint64, uint64) - __attribute__ ((visibility ("hidden"))); - -_Bool -__sync_bool_compare_and_swap_8 (uint64* ptr, uint64 old, uint64 new) -{ - int i; - _Bool ret; - - i = pthread_mutex_lock (&sync_lock); - __go_assert (i == 0); - - if (*ptr != old) - ret = 0; - else - { - *ptr = new; - ret = 1; - } - - i = pthread_mutex_unlock (&sync_lock); - __go_assert (i == 0); - - return ret; -} - -#endif - -#ifndef HAVE_SYNC_FETCH_AND_ADD_4 - -uint32 -__sync_fetch_and_add_4 (uint32*, uint32) - __attribute__ ((visibility ("hidden"))); - -uint32 -__sync_fetch_and_add_4 (uint32* ptr, uint32 add) -{ - int i; - uint32 ret; - - i = pthread_mutex_lock (&sync_lock); - __go_assert (i == 0); - - ret = *ptr; - *ptr += add; - - i = pthread_mutex_unlock (&sync_lock); - __go_assert (i == 0); - - return ret; -} - -#endif - -#ifndef HAVE_SYNC_ADD_AND_FETCH_8 - -uint64 -__sync_add_and_fetch_8 (uint64*, uint64) - __attribute__ ((visibility ("hidden"))); - -uint64 -__sync_add_and_fetch_8 (uint64* ptr, uint64 add) -{ - int i; - uint64 ret; - - i = pthread_mutex_lock (&sync_lock); - __go_assert (i == 0); - - *ptr += add; - ret = *ptr; - - i = pthread_mutex_unlock (&sync_lock); - __go_assert (i == 0); - - return ret; -} - -#endif - -uintptr -runtime_memlimit(void) -{ - struct rlimit rl; - uintptr used; - - if(getrlimit(RLIMIT_AS, &rl) != 0) - return 0; - if(rl.rlim_cur >= 0x7fffffff) - return 0; - - // Estimate our VM footprint excluding the heap. - // Not an exact science: use size of binary plus - // some room for thread stacks. - used = (64<<20); - if(used >= rl.rlim_cur) - return 0; - - // If there's not at least 16 MB left, we're probably - // not going to be able to do much. Treat as no limit. - rl.rlim_cur -= used; - if(rl.rlim_cur < (16<<20)) - return 0; - - return rl.rlim_cur - used; -}