> > @@ -1211,49 +1205,25 @@ __rte_hash_lookup_with_hash(const struct
> > rte_hash *h, const void *key,
> >
> >         __hash_rw_reader_lock(h);
> >
> > -       do {
> > -               /* Load the table change counter before the lookup
> > -                * starts. Acquire semantics will make sure that
> > -                * loads in search_one_bucket are not hoisted.
> > -                */
> > -               cnt_b = __atomic_load_n(h->tbl_chng_cnt,
> > -                               __ATOMIC_ACQUIRE);
> > +       /* Check if key is in primary location */
> > +       bkt = &h->buckets[prim_bucket_idx];
> 
> 
> In original version, this bkt assignment is before to __hash_rw_reader_lock().
> This causing performance issue in lookup 'hit' case.
> 
> Following change is fixing it.i.e bringing back to orginal version.
> 
> [master]83xx1.2[dpdk]# git diff
> diff --git a/lib/librte_hash/rte_cuckoo_hash.c
> b/lib/librte_hash/rte_cuckoo_hash.c
> index 7e1a9ac96..bc8a55f0f 100644
> --- a/lib/librte_hash/rte_cuckoo_hash.c
> +++ b/lib/librte_hash/rte_cuckoo_hash.c
> @@ -1204,10 +1204,11 @@ __rte_hash_lookup_with_hash_l(const struct
> rte_hash *h, const void *key,
>         prim_bucket_idx = get_prim_bucket_index(h, sig);
>         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
> 
> -       __hash_rw_reader_lock(h);
> -
>         /* Check if key is in primary location */
>         bkt = &h->buckets[prim_bucket_idx];
> +
> +       __hash_rw_reader_lock(h);
> +
>         ret = search_one_bucket_l(h, key, short_sig, data, bkt);
>         if (ret != -1) {
>                 __hash_rw_reader_unlock(h);
> 
> 
> Could you send the final version that needs to taken into tree.
> i.e remove intermediate commits only for review purpose.
> I can test it finally with that.
Thanks Jerin for testing. I have sent out v2.

Reply via email to