On 14/05/16 06:34, Emilio G. Cota wrote: > +/* > + * Get a head bucket and lock it, making sure its parent map is not stale. > + * @pmap is filled with a pointer to the bucket's parent map. > + * > + * Unlock with qemu_spin_unlock(&b->lock). > + */ > +static inline > +struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash, > + struct qht_map **pmap) > +{ > + struct qht_bucket *b; > + struct qht_map *map; > + > + for (;;) { > + map = qht_map__atomic_mb(ht); > + b = qht_map_to_bucket(map, hash); > + > + qemu_spin_lock(&b->lock); > + if (likely(!map->stale)) { > + *pmap = map; > + return b; > + } > + qemu_spin_unlock(&b->lock); > + > + /* resize in progress; wait until it completes */ > + while (qemu_spin_locked(&ht->lock)) { > + cpu_relax(); > + } > + } > +}
What if we turn qht::lock into a mutex and change the function as follows: static inline struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash, struct qht_map **pmap) { struct qht_bucket *b; struct qht_map *map; map = atomic_rcu_read(&ht->map); b = qht_map_to_bucket(map, hash); qemu_spin_lock(&b->lock); /* 'ht->map' access is serialized by 'b->lock' here */ if (likely(map == ht->map)) { /* no resize in progress; we're done */ *pmap = map; return b; } qemu_spin_unlock(&b->lock); /* resize in progress; retry grabbing 'ht->lock' */ qemu_mutex_lock(&ht->lock); b = qht_map_to_bucket(ht->map, hash); *pmap = ht->map; qemu_spin_lock(&b->lock); qemu_mutex_unlock(&ht->lock); return b; } With this implementation we could: (1) get rid of qht_map::stale (2) don't waste cycles waiting for resize to complete Kind regards, Sergey