Changeset: 73563cb8b61f for MonetDB
URL: https://dev.monetdb.org/hg/MonetDB?cmd=changeset;node=73563cb8b61f
Modified Files:
clients/Tests/exports.stable.out
gdk/gdk_atomic.h
gdk/gdk_bbp.c
gdk/gdk_system.c
gdk/gdk_system.h
gdk/gdk_utils.c
monetdb5/mal/mal_dataflow.c
monetdb5/mal/mal_private.h
monetdb5/mal/mal_profiler.c
monetdb5/mal/mal_resource.c
monetdb5/mal/mal_runtime.c
monetdb5/modules/mal/mal_mapi.c
sql/server/rel_updates.c
sql/server/sql_mvc.c
sql/storage/bat/bat_storage.c
sql/storage/sql_storage.h
sql/storage/store.c
Branch: Apr2019
Log Message:
Simplify interface for atomic instructions.
No need anymore to declare locks (mutexes) for atomic instructions: if
necessary, they are created automatically.
diffs (truncated from 1856 to 300 lines):
diff --git a/clients/Tests/exports.stable.out b/clients/Tests/exports.stable.out
--- a/clients/Tests/exports.stable.out
+++ b/clients/Tests/exports.stable.out
@@ -254,11 +254,11 @@ gdk_return GDKgetsemval(int sem_id, int
bool GDKinit(opt *set, int setlen);
void *GDKinitmmap(size_t id, size_t size, size_t *return_size);
BAT *GDKkey;
-ATOMIC_TYPE volatile GDKlockcnt;
-ATOMIC_TYPE volatile GDKlockcontentioncnt;
+ATOMIC_TYPE GDKlockcnt;
+ATOMIC_TYPE GDKlockcontentioncnt;
MT_Lock *volatile GDKlocklist;
-ATOMIC_FLAG volatile GDKlocklistlock;
-ATOMIC_TYPE volatile GDKlocksleepcnt;
+ATOMIC_FLAG GDKlocklistlock;
+ATOMIC_TYPE GDKlocksleepcnt;
void GDKlockstatistics(int);
void *GDKmalloc(size_t size) __attribute__((__malloc__))
__attribute__((__alloc_size__(1))) __attribute__((__warn_unused_result__));
size_t GDKmem_cursize(void);
diff --git a/gdk/gdk_atomic.h b/gdk/gdk_atomic.h
--- a/gdk/gdk_atomic.h
+++ b/gdk/gdk_atomic.h
@@ -11,13 +11,9 @@
* performed in one thread shows up in another thread either
* completely or not at all.
*
- * If the symbol ATOMIC_LOCK is defined, a variable of type MT_Lock
- * must be declared and initialized. The latter can be done using the
- * macro ATOMIC_INIT which expands to nothing if ATOMIC_LOCK is not
- * defined.
- *
* The following operations are defined:
- * ATOMIC_VAR_INIT -- initialize the variable (not necessarily atomic!);
+ * ATOMIC_VAR_INIT -- initializer for the variable (not necessarily atomic!);
+ * ATOMIC_INIT -- initialize the variable (not necessarily atomic!);
* ATOMIC_GET -- return the value of a variable;
* ATOMIC_SET -- set the value of a variable;
* ATOMIC_ADD -- add a value to a variable, return original value;
@@ -48,20 +44,19 @@
#define ATOMIC_TYPE AO_t
#define ATOMIC_VAR_INIT(val) (val)
+#define ATOMIC_INIT(var, val) (*(var) = (val))
-#define ATOMIC_GET(var, lck) AO_load_full(&var)
-#define ATOMIC_SET(var, val, lck) AO_store_full(&var, (AO_t) (val))
-#define ATOMIC_ADD(var, val, lck) AO_fetch_and_add(&var, (AO_t) (val))
-#define ATOMIC_SUB(var, val, lck) AO_fetch_and_add(&var, (AO_t) -(val))
-#define ATOMIC_INC(var, lck) (AO_fetch_and_add1(&var) + 1)
-#define ATOMIC_DEC(var, lck) (AO_fetch_and_sub1(&var) - 1)
-
-#define ATOMIC_INIT(lck) ((void) 0)
+#define ATOMIC_GET(var) AO_load_full(var)
+#define ATOMIC_SET(var, val) AO_store_full(var, (AO_t) (val))
+#define ATOMIC_ADD(var, val) AO_fetch_and_add(var, (AO_t) (val))
+#define ATOMIC_SUB(var, val) AO_fetch_and_add(var, (AO_t) -(val))
+#define ATOMIC_INC(var) (AO_fetch_and_add1(var) + 1)
+#define ATOMIC_DEC(var) (AO_fetch_and_sub1(var) - 1)
#define ATOMIC_FLAG AO_TS_t
#define ATOMIC_FLAG_INIT { AO_TS_INITIALIZER }
-#define ATOMIC_CLEAR(var, lck) AO_CLEAR(&var)
-#define ATOMIC_TAS(var, lck) (AO_test_and_set_full(&var) != AO_TS_CLEAR)
+#define ATOMIC_CLEAR(var) AO_CLEAR(var)
+#define ATOMIC_TAS(var) (AO_test_and_set_full(var) != AO_TS_CLEAR)
#elif defined(HAVE_STDATOMIC_H) && !defined(__INTEL_COMPILER) &&
!defined(__STDC_NO_ATOMICS__) && !defined(NO_ATOMIC_INSTRUCTIONS)
@@ -90,19 +85,18 @@
#define ATOMIC_CAST unsigned long long
#endif
-#define ATOMIC_GET(var, lck) atomic_load(&var)
-#define ATOMIC_SET(var, val, lck) atomic_store(&var, (ATOMIC_CAST) (val))
-#define ATOMIC_ADD(var, val, lck) atomic_fetch_add(&var, (ATOMIC_CAST)
(val))
-#define ATOMIC_SUB(var, val, lck) atomic_fetch_sub(&var, (ATOMIC_CAST)
(val))
-#define ATOMIC_INC(var, lck) (atomic_fetch_add(&var, 1) + 1)
-#define ATOMIC_DEC(var, lck) (atomic_fetch_sub(&var, 1) - 1)
+#define ATOMIC_INIT(var, val) atomic_init(var, (ATOMIC_CAST) (val))
+#define ATOMIC_GET(var) atomic_load(var)
+#define ATOMIC_SET(var, val) atomic_store(var, (ATOMIC_CAST) (val))
+#define ATOMIC_ADD(var, val) atomic_fetch_add(var, (ATOMIC_CAST) (val))
+#define ATOMIC_SUB(var, val) atomic_fetch_sub(var, (ATOMIC_CAST) (val))
+#define ATOMIC_INC(var) (atomic_fetch_add(var, 1) + 1)
+#define ATOMIC_DEC(var) (atomic_fetch_sub(var, 1) - 1)
-#define ATOMIC_INIT(lck) ((void) 0)
-
-#define ATOMIC_FLAG atomic_flag
+#define ATOMIC_FLAG atomic_flag
/* ATOMIC_FLAG_INIT is already defined by the include file */
-#define ATOMIC_CLEAR(var, lck) atomic_flag_clear(&var)
-#define ATOMIC_TAS(var, lck) atomic_flag_test_and_set(&var)
+#define ATOMIC_CLEAR(var) atomic_flag_clear(var)
+#define ATOMIC_TAS(var) atomic_flag_test_and_set(var)
#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER) &&
!defined(NO_ATOMIC_INSTRUCTIONS)
@@ -110,15 +104,17 @@
#if SIZEOF_SSIZE_T == 8
-#define ATOMIC_TYPE int64_t
-#define ATOMIC_VAR_INIT(val) (val)
+#define ATOMIC_TYPE volatile int64_t
+#define ATOMIC_VAR_INIT(val) (val)
+#define ATOMIC_INIT(var, val) (*(var) = (val))
-#define ATOMIC_GET(var, lck) var
-#define ATOMIC_SET(var, val, lck) _InterlockedExchange64(&var, (int64_t)
(val))
-#define ATOMIC_ADD(var, val, lck) _InterlockedExchangeAdd64(&var,
(int64_t) (val))
-#define ATOMIC_SUB(var, val, lck) _InterlockedExchangeAdd64(&var,
-(int64_t) (val))
-#define ATOMIC_INC(var, lck) _InterlockedIncrement64(&var)
-#define ATOMIC_DEC(var, lck) _InterlockedDecrement64(&var)
+#define ATOMIC_GET(var) (*(var))
+/* should we use _InterlockedExchangeAdd64(var, 0) instead? */
+#define ATOMIC_SET(var, val) _InterlockedExchange64(var, (int64_t) (val))
+#define ATOMIC_ADD(var, val) _InterlockedExchangeAdd64(var, (int64_t) (val))
+#define ATOMIC_SUB(var, val) _InterlockedExchangeAdd64(var, -(int64_t) (val))
+#define ATOMIC_INC(var) _InterlockedIncrement64(var)
+#define ATOMIC_DEC(var) _InterlockedDecrement64(var)
#pragma intrinsic(_InterlockedExchange64)
#pragma intrinsic(_InterlockedExchangeAdd64)
@@ -128,15 +124,17 @@
#else
-#define ATOMIC_TYPE int
-#define ATOMIC_VAR_INIT(val) (val)
+#define ATOMIC_TYPE volatile int
+#define ATOMIC_VAR_INIT(val) (val)
+#define ATOMIC_INIT(var, val) (*(var) = (val))
-#define ATOMIC_GET(var, lck) var
-#define ATOMIC_SET(var, val, lck) _InterlockedExchange(&var, (int) (val))
-#define ATOMIC_ADD(var, val, lck) _InterlockedExchangeAdd(&var, (int)
(val))
-#define ATOMIC_SUB(var, val, lck) _InterlockedExchangeAdd(&var, -(int)
(val))
-#define ATOMIC_INC(var, lck) _InterlockedIncrement(&var)
-#define ATOMIC_DEC(var, lck) _InterlockedDecrement(&var)
+#define ATOMIC_GET(var) (*(var))
+/* should we use _InterlockedExchangeAdd(var, 0) instead? */
+#define ATOMIC_SET(var, val) _InterlockedExchange(var, (int) (val))
+#define ATOMIC_ADD(var, val) _InterlockedExchangeAdd(var, (int) (val))
+#define ATOMIC_SUB(var, val) _InterlockedExchangeAdd(var, -(int) (val))
+#define ATOMIC_INC(var) _InterlockedIncrement(var)
+#define ATOMIC_DEC(var) _InterlockedDecrement(var)
#pragma intrinsic(_InterlockedExchange)
#pragma intrinsic(_InterlockedExchangeAdd)
@@ -145,161 +143,146 @@
#endif
-#define ATOMIC_INIT(lck) ((void) 0)
-
-#define ATOMIC_FLAG int
-#define ATOMIC_FLAG_INIT { 0 }
-#define ATOMIC_CLEAR(var, lck) _InterlockedExchange(&var, 0)
-#define ATOMIC_TAS(var, lck) _InterlockedCompareExchange(&var, 1, 0)
+#define ATOMIC_FLAG int
+#define ATOMIC_FLAG_INIT { 0 }
+#define ATOMIC_CLEAR(var) _InterlockedExchange(var, 0)
+#define ATOMIC_TAS(var) _InterlockedCompareExchange(var, 1, 0)
#pragma intrinsic(_InterlockedCompareExchange)
#elif (defined(__GNUC__) || defined(__INTEL_COMPILER)) && !(defined(__sun__)
&& SIZEOF_SIZE_T == 8) && !defined(_MSC_VER) && !defined(NO_ATOMIC_INSTRUCTIONS)
-#if SIZEOF_SSIZE_T == 8
-#define ATOMIC_TYPE int64_t
-#else
-#define ATOMIC_TYPE int
-#endif
-#define ATOMIC_VAR_INIT(val) (val)
+/* the new way of doing this according to GCC (the old way, using
+ * __sync_* primitives is not supported) */
-#ifdef __ATOMIC_SEQ_CST
-
-/* the new way of doing this according to GCC */
-#define ATOMIC_GET(var, lck) __atomic_load_n(&var, __ATOMIC_SEQ_CST)
-#define ATOMIC_SET(var, val, lck) __atomic_store_n(&var, (ATOMIC_TYPE)
(val), __ATOMIC_SEQ_CST)
-#define ATOMIC_ADD(var, val, lck) __atomic_fetch_add(&var, (ATOMIC_TYPE)
(val), __ATOMIC_SEQ_CST)
-#define ATOMIC_SUB(var, val, lck) __atomic_fetch_sub(&var, (ATOMIC_TYPE)
(val), __ATOMIC_SEQ_CST)
-#define ATOMIC_INC(var, lck) __atomic_add_fetch(&var, 1,
__ATOMIC_SEQ_CST)
-#define ATOMIC_DEC(var, lck) __atomic_sub_fetch(&var, 1,
__ATOMIC_SEQ_CST)
+#if SIZEOF_SSIZE_T == 8
+#define ATOMIC_TYPE int64_t
+#else
+#define ATOMIC_TYPE int
+#endif
+#define ATOMIC_VAR_INIT(val) (val)
+#define ATOMIC_INIT(var, val) (*(var) = (val))
-#define ATOMIC_FLAG char
-#define ATOMIC_FLAG_INIT { 0 }
-#define ATOMIC_CLEAR(var, lck) __atomic_clear(&var, __ATOMIC_SEQ_CST)
-#define ATOMIC_TAS(var, lck) __atomic_test_and_set(&var,
__ATOMIC_SEQ_CST)
-
-#else
+#define ATOMIC_GET(var) __atomic_load_n(var, __ATOMIC_SEQ_CST)
+#define ATOMIC_SET(var, val) __atomic_store_n(var, (ATOMIC_TYPE) (val),
__ATOMIC_SEQ_CST)
+#define ATOMIC_ADD(var, val) __atomic_fetch_add(var, (ATOMIC_TYPE) (val),
__ATOMIC_SEQ_CST)
+#define ATOMIC_SUB(var, val) __atomic_fetch_sub(var, (ATOMIC_TYPE) (val),
__ATOMIC_SEQ_CST)
+#define ATOMIC_INC(var) __atomic_add_fetch(var, 1,
__ATOMIC_SEQ_CST)
+#define ATOMIC_DEC(var) __atomic_sub_fetch(var, 1,
__ATOMIC_SEQ_CST)
-/* the old way of doing this, (still?) needed for Intel compiler on Linux */
-#define ATOMIC_GET(var, lck) var
-#define ATOMIC_SET(var, val, lck) (var = (ATOMIC_TYPE) (val))
-#define ATOMIC_ADD(var, val, lck) __sync_fetch_and_add(&var,
(ATOMIC_TYPE) (val))
-#define ATOMIC_SUB(var, val, lck) __sync_fetch_and_sub(&var,
(ATOMIC_TYPE) (val))
-#define ATOMIC_INC(var, lck) __sync_add_and_fetch(&var, 1)
-#define ATOMIC_DEC(var, lck) __sync_sub_and_fetch(&var, 1)
-
-#define ATOMIC_FLAG int
-#define ATOMIC_FLAG_INIT { 0 }
-#define ATOMIC_CLEAR(var, lck) __sync_lock_release(&var)
-#define ATOMIC_TAS(var, lck) __sync_lock_test_and_set(&var, 1)
-
-#endif
-
-#define ATOMIC_INIT(lck) ((void) 0)
+#define ATOMIC_FLAG char
+#define ATOMIC_FLAG_INIT { 0 }
+#define ATOMIC_CLEAR(var) __atomic_clear(var, __ATOMIC_SEQ_CST)
+#define ATOMIC_TAS(var) __atomic_test_and_set(var,
__ATOMIC_SEQ_CST)
#else
-#if SIZEOF_SSIZE_T == 8
-#define ATOMIC_TYPE int64_t
-#else
-#define ATOMIC_TYPE int
-#endif
-#define ATOMIC_VAR_INIT(val) (val)
+/* emulate using mutexes */
+
+typedef struct {
+ size_t val;
+ pthread_mutex_t lck;
+} ATOMIC_TYPE;
+#define ATOMIC_VAR_INIT(v) { .val = (v), .lck = PTHREAD_MUTEX_INITIALIZER }
-static inline ATOMIC_TYPE
-__ATOMIC_GET(volatile ATOMIC_TYPE *var, pthread_mutex_t *lck)
+static inline void
+ATOMIC_INIT(ATOMIC_TYPE *var, size_t val)
{
- ATOMIC_TYPE old;
- pthread_mutex_lock(lck);
- old = *var;
- pthread_mutex_unlock(lck);
+ pthread_mutex_init(&var->lck, 0);
+ var->val = val;
+}
+#define ATOMIC_INIT(var, val) ATOMIC_INIT((var), (size_t) (val))
+
+static inline size_t
+ATOMIC_GET(ATOMIC_TYPE *var)
+{
+ size_t old;
+ pthread_mutex_lock(&var->lck);
+ old = var->val;
+ pthread_mutex_unlock(&var->lck);
return old;
}
-#define ATOMIC_GET(var, lck) __ATOMIC_GET(&var, &(lck).lock)
-static inline ATOMIC_TYPE
-__ATOMIC_SET(volatile ATOMIC_TYPE *var, ATOMIC_TYPE val, pthread_mutex_t *lck)
+static inline size_t
+ATOMIC_SET(ATOMIC_TYPE *var, size_t val)
{
- ATOMIC_TYPE new;
- pthread_mutex_lock(lck);
- *var = val;
- new = *var;
- pthread_mutex_unlock(lck);
+ size_t new;
+ pthread_mutex_lock(&var->lck);
+ new = var->val = val;
+ pthread_mutex_unlock(&var->lck);
return new;
}
-#define ATOMIC_SET(var, val, lck) __ATOMIC_SET(&var, (ATOMIC_TYPE) (val),
&(lck).lock)
+#define ATOMIC_SET(var, val) ATOMIC_SET(var, (size_t) (val))
-static inline ATOMIC_TYPE
-__ATOMIC_ADD(volatile ATOMIC_TYPE *var, ATOMIC_TYPE val, pthread_mutex_t *lck)
+static inline size_t
+ATOMIC_ADD(ATOMIC_TYPE *var, size_t val)
{
- ATOMIC_TYPE old;
- pthread_mutex_lock(lck);
- old = *var;
- *var += val;
- pthread_mutex_unlock(lck);
+ size_t old;
+ pthread_mutex_lock(&var->lck);
_______________________________________________
checkin-list mailing list
[email protected]
https://www.monetdb.org/mailman/listinfo/checkin-list