From: Sergey Fedorov <serge.f...@gmail.com> It is naturally expected that some memory ordering should be provided around qht_insert(), qht_remove(), and qht_lookup(). Document these assumptions in the header file and put some comments in the source to denote how that memory ordering requirements are fulfilled.
Signed-off-by: Sergey Fedorov <serge.f...@gmail.com> Signed-off-by: Sergey Fedorov <sergey.fedo...@linaro.org> --- include/qemu/qht.h | 9 +++++++++ util/qht.c | 8 ++++++++ 2 files changed, 17 insertions(+) diff --git a/include/qemu/qht.h b/include/qemu/qht.h index 70bfc68b8d67..5f633e5d8100 100644 --- a/include/qemu/qht.h +++ b/include/qemu/qht.h @@ -69,6 +69,9 @@ void qht_destroy(struct qht *ht); * Attempting to insert a NULL @p is a bug. * Inserting the same pointer @p with different @hash values is a bug. * + * In case of successful operation, smp_wmb() is implied before the pointer is + * inserted into the hash table. + * * Returns true on sucess. * Returns false if the @p-@hash pair already exists in the hash table. */ @@ -86,6 +89,9 @@ bool qht_insert(struct qht *ht, void *p, uint32_t hash); * The user-provided @func compares pointers in QHT against @userp. * If the function returns true, a match has been found. * + * smp_rmb() is implied before and after the pointer is looked up and retrieved + * from the hash table. + * * Returns the corresponding pointer when a match is found. * Returns NULL otherwise. */ @@ -105,6 +111,9 @@ void *qht_lookup(struct qht *ht, qht_lookup_func_t func, const void *userp, * This guarantees that concurrent lookups will always compare against valid * data. * + * In case of successful operation, smp_wmb() is implied after the pointer is + * removed from the hash table. + * * Returns true on success. * Returns false if the @p-@hash pair was not found. */ diff --git a/util/qht.c b/util/qht.c index 40d6e218f759..0469d068b4de 100644 --- a/util/qht.c +++ b/util/qht.c @@ -466,8 +466,10 @@ void *qht_lookup__slowpath(struct qht_bucket *b, qht_lookup_func_t func, void *ret; do { + /* seqlock_read_begin() also serves as the gaurantee of smp_rmb() */ version = seqlock_read_begin(&b->sequence); ret = qht_do_lookup(b, func, userp, hash); + /* seqlock_read_retry() also serves as the gaurantee of smp_rmb() */ } while (seqlock_read_retry(&b->sequence, version)); return ret; } @@ -483,8 +485,10 @@ void *qht_lookup(struct qht *ht, qht_lookup_func_t func, const void *userp, map = atomic_rcu_read(&ht->map); b = qht_map_to_bucket(map, hash); + /* seqlock_read_begin() also serves as the gaurantee of smp_rmb() */ version = seqlock_read_begin(&b->sequence); ret = qht_do_lookup(b, func, userp, hash); + /* seqlock_read_retry() also serves as the gaurantee of smp_rmb() */ if (likely(!seqlock_read_retry(&b->sequence, version))) { return ret; } @@ -530,6 +534,7 @@ static bool qht_insert__locked(struct qht *ht, struct qht_map *map, found: /* found an empty key: acquire the seqlock and write */ + /* seqlock_write_begin() also serves as the guarantee of smp_wmb() */ seqlock_write_begin(&head->sequence); if (new) { atomic_rcu_set(&prev->next, b); @@ -661,6 +666,9 @@ bool qht_remove__locked(struct qht_map *map, struct qht_bucket *head, qht_debug_assert(b->hashes[i] == hash); seqlock_write_begin(&head->sequence); qht_bucket_remove_entry(b, i); + /* seqlock_write_end() also serves as the guarantee of + * smp_wmb() + */ seqlock_write_end(&head->sequence); return true; } -- 1.9.1