This matches the semantics of liburcu. Signed-off-by: Emilio G. Cota <c...@braap.org> --- docs/rcu.txt | 33 +++++++++++++++------------------ include/qemu/atomic.h | 35 ++++++++++++++++++----------------- memory.c | 6 +++--- tests/rcutorture.c | 4 ++-- 4 files changed, 38 insertions(+), 40 deletions(-)
diff --git a/docs/rcu.txt b/docs/rcu.txt index 575f563..154b18a 100644 --- a/docs/rcu.txt +++ b/docs/rcu.txt @@ -127,13 +127,13 @@ The core RCU API is small: call_rcu_first_elem(foo_reclaim, g_free, rcu); - typeof(*p) atomic_rcu_read(p); + typeof(p) rcu_dereference(p); - atomic_rcu_read() is similar to atomic_mb_read(), but it makes + rcu_dereference() is similar to atomic_mb_read(), but it makes some assumptions on the code that calls it. This allows a more optimized implementation. - atomic_rcu_read assumes that whenever a single RCU critical + rcu_dereference assumes that whenever a single RCU critical section reads multiple shared data, these reads are either data-dependent or need no ordering. This is almost always the case when using RCU, because read-side critical sections typically @@ -141,7 +141,7 @@ The core RCU API is small: every update) until reaching a data structure of interest, and then read from there. - RCU read-side critical sections must use atomic_rcu_read() to + RCU read-side critical sections must use rcu_dereference() to read data, unless concurrent writes are presented by another synchronization mechanism. @@ -149,18 +149,18 @@ The core RCU API is small: data structure in a single direction, opposite to the direction in which the updater initializes it. - void atomic_rcu_set(p, typeof(*p) v); + void rcu_assign_pointer(p, typeof(p) v); - atomic_rcu_set() is also similar to atomic_mb_set(), and it also + rcu_assign_pointer() is also similar to atomic_mb_set(), and it also makes assumptions on the code that calls it in order to allow a more optimized implementation. - In particular, atomic_rcu_set() suffices for synchronization + In particular, rcu_assign_pointer() suffices for synchronization with readers, if the updater never mutates a field within a data item that is already accessible to readers. This is the case when initializing a new copy of the RCU-protected data structure; just ensure that initialization of *p is carried out - before atomic_rcu_set() makes the data item visible to readers. + before rcu_assign_pointer() makes the data item visible to readers. If this rule is observed, writes will happen in the opposite order as reads in the RCU read-side critical sections (or if there is just one update), and there will be no need for other @@ -193,9 +193,6 @@ DIFFERENCES WITH LINUX programming; not allowing this would prevent upgrading an RCU read-side critical section to become an updater. -- atomic_rcu_read and atomic_rcu_set replace rcu_dereference and - rcu_assign_pointer. They take a _pointer_ to the variable being accessed. - - call_rcu_first_elem is a macro that has an extra argument (the name of the first field in the struct, which must be a struct rcu_head), and expects the type of the callback's argument to be the type of the first argument. @@ -237,7 +234,7 @@ may be used as a restricted reference-counting mechanism. For example, consider the following code fragment: rcu_read_lock(); - p = atomic_rcu_read(&foo); + p = rcu_dereference(foo); /* do something with p. */ rcu_read_unlock(); @@ -248,7 +245,7 @@ The write side looks simply like this (with appropriate locking): qemu_mutex_lock(&foo_mutex); old = foo; - atomic_rcu_set(&foo, new); + rcu_assign_pointer(foo, new); qemu_mutex_unlock(&foo_mutex); synchronize_rcu(); free(old); @@ -257,7 +254,7 @@ If the processing cannot be done purely within the critical section, it is possible to combine this idiom with a "real" reference count: rcu_read_lock(); - p = atomic_rcu_read(&foo); + p = rcu_dereference(foo); foo_ref(p); rcu_read_unlock(); /* do something with p. */ @@ -267,7 +264,7 @@ The write side can be like this: qemu_mutex_lock(&foo_mutex); old = foo; - atomic_rcu_set(&foo, new); + rcu_assign_pointer(foo, new); qemu_mutex_unlock(&foo_mutex); synchronize_rcu(); foo_unref(old); @@ -276,7 +273,7 @@ or with call_rcu: qemu_mutex_lock(&foo_mutex); old = foo; - atomic_rcu_set(&foo, new); + rcu_assign_pointer(foo, new); qemu_mutex_unlock(&foo_mutex); call_rcu(foo_unref, old, rcu); @@ -355,7 +352,7 @@ Instead, we store the size of the array with the array itself: read side: rcu_read_lock(); - struct arr *array = atomic_rcu_read(&global_array); + struct arr *array = rcu_dereference(global_array); x = i < array->size ? array->data[i] : -1; rcu_read_unlock(); return x; @@ -372,7 +369,7 @@ Instead, we store the size of the array with the array itself: /* Removal phase. */ old_array = global_array; - atomic_rcu_set(&new_array->data, new_array); + rcu_assign_pointer(new_array->data, new_array); synchronize_rcu(); /* Reclamation phase. */ diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h index 98e05ca..4795e28 100644 --- a/include/qemu/atomic.h +++ b/include/qemu/atomic.h @@ -130,7 +130,7 @@ #endif /** - * atomic_rcu_read - reads a RCU-protected pointer to a local variable + * rcu_dereference - reads a RCU-protected pointer to a local variable * into a RCU read-side critical section. The pointer can later be safely * dereferenced within the critical section. * @@ -140,25 +140,26 @@ * Inserts memory barriers on architectures that require them (currently only * Alpha) and documents which pointers are protected by RCU. * - * Unless the __ATOMIC_CONSUME memory order is available, atomic_rcu_read also + * Unless the __ATOMIC_CONSUME memory order is available, rcu_dereference also * includes a compiler barrier to ensure that value-speculative optimizations * (e.g. VSS: Value Speculation Scheduling) does not perform the data read * before the pointer read by speculating the value of the pointer. On new * enough compilers, atomic_load takes care of such concern about * dependency-breaking optimizations. * - * Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg(). + * Should match rcu_assign_pointer(), atomic_xchg(), atomic_cmpxchg(). */ -#ifndef atomic_rcu_read +#ifndef rcu_dereference + #ifdef __ATOMIC_CONSUME -#define atomic_rcu_read(ptr) ({ \ - typeof(*ptr) _val; \ - __atomic_load(ptr, &_val, __ATOMIC_CONSUME); \ +#define rcu_dereference(ptr) ({ \ + typeof(ptr) _val; \ + __atomic_load(&ptr, &_val, __ATOMIC_CONSUME);\ _val; \ }) #else -#define atomic_rcu_read(ptr) ({ \ - typeof(*ptr) _val = atomic_read(ptr); \ +#define rcu_dereference(ptr) ({ \ + typeof(ptr) _val = atomic_read(&ptr); \ smp_read_barrier_depends(); \ _val; \ }) @@ -166,7 +167,7 @@ #endif /** - * atomic_rcu_set - assigns (publicizes) a pointer to a new data structure + * rcu_assign_pointer - assigns (publicizes) a pointer to a new data structure * meant to be read by RCU read-side critical sections. * * Documents which pointers will be dereferenced by RCU read-side critical @@ -174,18 +175,18 @@ * them. It also makes sure the compiler does not reorder code initializing the * data structure before its publication. * - * Should match atomic_rcu_read(). + * Should match rcu_dereference(). */ -#ifndef atomic_rcu_set +#ifndef rcu_assign_pointer #ifdef __ATOMIC_RELEASE -#define atomic_rcu_set(ptr, i) do { \ - typeof(*ptr) _val = (i); \ - __atomic_store(ptr, &_val, __ATOMIC_RELEASE); \ +#define rcu_assign_pointer(ptr, i) do { \ + typeof(ptr) _val = (i); \ + __atomic_store(&ptr, &_val, __ATOMIC_RELEASE);\ } while(0) #else -#define atomic_rcu_set(ptr, i) do { \ +#define rcu_assign_pointer(ptr, i) do { \ smp_wmb(); \ - atomic_set(ptr, i); \ + atomic_set(&ptr, i); \ } while (0) #endif #endif diff --git a/memory.c b/memory.c index dc5e4e9..b950259 100644 --- a/memory.c +++ b/memory.c @@ -642,7 +642,7 @@ static FlatView *address_space_get_flatview(AddressSpace *as) FlatView *view; rcu_read_lock(); - view = atomic_rcu_read(&as->current_map); + view = rcu_dereference(as->current_map); flatview_ref(view); rcu_read_unlock(); return view; @@ -754,7 +754,7 @@ static void address_space_update_topology(AddressSpace *as) address_space_update_topology_pass(as, old_view, new_view, true); /* Writes are protected by the BQL. */ - atomic_rcu_set(&as->current_map, new_view); + rcu_assign_pointer(as->current_map, new_view); call_rcu_first_elem(old_view, flatview_unref, rcu); /* Note that all the old MemoryRegions are still alive up to this @@ -1829,7 +1829,7 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr, range = addrrange_make(int128_make64(addr), int128_make64(size)); rcu_read_lock(); - view = atomic_rcu_read(&as->current_map); + view = rcu_dereference(as->current_map); fr = flatview_lookup(view, range); if (!fr) { goto out; diff --git a/tests/rcutorture.c b/tests/rcutorture.c index 60a2ccf..2c10107 100644 --- a/tests/rcutorture.c +++ b/tests/rcutorture.c @@ -251,7 +251,7 @@ static void *rcu_read_stress_test(void *arg) } while (goflag == GOFLAG_RUN) { rcu_read_lock(); - p = atomic_rcu_read(&rcu_stress_current); + p = rcu_dereference(rcu_stress_current); if (p->mbtest == 0) { n_mberror++; } @@ -298,7 +298,7 @@ static void *rcu_update_stress_test(void *arg) smp_mb(); p->pipe_count = 0; p->mbtest = 1; - atomic_rcu_set(&rcu_stress_current, p); + rcu_assign_pointer(rcu_stress_current, p); rcu_stress_idx = i; for (i = 0; i < RCU_STRESS_PIPE_LEN; i++) { if (i != rcu_stress_idx) { -- 1.8.3