fat_rwlock_tryrdlock() used to call fat_rwlock_get_slot__() which could block in the "slow path" case. This commit adds fat_rwlock_try_get_slot__() which does not block, even in the "slow path" case.
Signed-off-by: Daniele Di Proietto <ddiproie...@vmware.com> --- lib/fat-rwlock.c | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/lib/fat-rwlock.c b/lib/fat-rwlock.c index 82dfbfe..be53b56 100644 --- a/lib/fat-rwlock.c +++ b/lib/fat-rwlock.c @@ -162,6 +162,33 @@ fat_rwlock_rdlock(const struct fat_rwlock *rwlock_) } } +static struct fat_rwlock_slot * +fat_rwlock_try_get_slot__(struct fat_rwlock *rwlock) +{ + struct fat_rwlock_slot *slot; + + /* Fast path. */ + slot = ovsthread_getspecific(rwlock->key); + if (slot) { + return slot; + } + + /* Slow path: create a new slot for 'rwlock' in this thread. */ + + if (!ovs_mutex_trylock(&rwlock->mutex)) { + slot = xmalloc_cacheline(sizeof *slot); + slot->rwlock = rwlock; + ovs_mutex_init(&slot->mutex); + slot->depth = 0; + + list_push_back(&rwlock->threads, &slot->list_node); + ovs_mutex_unlock(&rwlock->mutex); + ovsthread_setspecific(rwlock->key, slot); + } + + return slot; +} + /* Tries to lock 'rwlock' for reading. If successful, returns 0. If taking * the lock would require blocking, returns EBUSY (without blocking). */ int @@ -170,9 +197,13 @@ fat_rwlock_tryrdlock(const struct fat_rwlock *rwlock_) OVS_NO_THREAD_SAFETY_ANALYSIS { struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_); - struct fat_rwlock_slot *this = fat_rwlock_get_slot__(rwlock); + struct fat_rwlock_slot *this = fat_rwlock_try_get_slot__(rwlock); int error; + if (!this) { + return EBUSY; + } + switch (this->depth) { case UINT_MAX: return EBUSY; -- 2.1.0.rc1 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev