Hi David, Thank you for the patch!
Some comments inlined. > On May 22, 2019, at 10:06 AM, David Marchand <david.march...@redhat.com> > wrote: > > Prefer the existing apis rather than direct access the configuration > structure. > > test_hash_multi_add_lookup() currently starts n readers and N writers > using rte_eal_remote_launch(). > It then waits for the N writers to complete with a custom > multi_writer_done[] array to synchronise over. > Jump on the occasion to use rte_eal_wait_lcore() so that the code is > more straightforward: > - we start n readers with rte_eal_remote_launch(), > - we start N writers with rte_eal_remote_launch(), > - we wait for N writers to join with rte_eal_wait_lcore(), > - we wait for n readers to join with rte_eal_wait_lcore(), > > Fixes: c7eb0972e74b ("test/hash: add lock-free r/w concurrency") > Fixes: 3f9aab961ed3 ("test/hash: check lock-free extendable bucket") > Cc: sta...@dpdk.org > > Signed-off-by: David Marchand <david.march...@redhat.com> > --- > app/test/test_hash_readwrite_lf.c | 41 ++++++++++++++++++--------------------- > 1 file changed, 19 insertions(+), 22 deletions(-) > > --- > Changelog since v1: > - fixed test hang in test_hash_multi_add_lookup() reported by Wang, Yipeng > > diff --git a/app/test/test_hash_readwrite_lf.c > b/app/test/test_hash_readwrite_lf.c > index 4ab4c8e..343a338 100644 > --- a/app/test/test_hash_readwrite_lf.c > +++ b/app/test/test_hash_readwrite_lf.c > @@ -86,7 +86,6 @@ struct { > static rte_atomic64_t greads; > > static volatile uint8_t writer_done; > -static volatile uint8_t multi_writer_done[4]; > > uint16_t enabled_core_ids[RTE_MAX_LCORE]; > > @@ -690,7 +689,6 @@ struct { > for (i = offset; i < offset + tbl_rwc_test_param.single_insert; i++) > rte_hash_add_key(tbl_rwc_test_param.h, > tbl_rwc_test_param.keys_ks + i); > - multi_writer_done[pos_core] = 1; > return 0; > } > > @@ -738,10 +736,9 @@ struct { > rte_eal_remote_launch(test_rwc_reader, > (void *)(uintptr_t)read_type, > enabled_core_ids[i]); > - rte_eal_mp_wait_lcore(); > > for (i = 1; i <= rwc_core_cnt[n]; i++) > - if (lcore_config[i].ret < 0) > + if (rte_eal_wait_lcore(i) < 0) if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0) (There are similar changes in other functions too. I realize that this is a separate issue than what the patch is aimed for. If you see fit, please integrate it, else I will put out a patch once your patch has been merged.) > goto err; > > unsigned long long cycles_per_lookup = > @@ -758,6 +755,7 @@ struct { > return 0; > > err: > + rte_eal_mp_wait_lcore(); > rte_hash_free(tbl_rwc_test_param.h); > return -1; > } > @@ -808,12 +806,11 @@ struct { > enabled_core_ids[i]); > ret = write_keys(write_type); > writer_done = 1; > - rte_eal_mp_wait_lcore(); > > if (ret < 0) > goto err; > for (i = 1; i <= rwc_core_cnt[n]; i++) > - if (lcore_config[i].ret < 0) > + if (rte_eal_wait_lcore(i) < 0) if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0) > goto err; > > unsigned long long cycles_per_lookup = > @@ -830,6 +827,7 @@ struct { > return 0; > > err: > + rte_eal_mp_wait_lcore(); > rte_hash_free(tbl_rwc_test_param.h); > return -1; > } > @@ -884,12 +882,11 @@ struct { > write_type = WRITE_KEY_SHIFT; > ret = write_keys(write_type); > writer_done = 1; > - rte_eal_mp_wait_lcore(); > > if (ret < 0) > goto err; > for (i = 1; i <= rwc_core_cnt[n]; i++) > - if (lcore_config[i].ret < 0) > + if (rte_eal_wait_lcore(i) < 0) if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0) > goto err; > > unsigned long long cycles_per_lookup = > @@ -906,6 +903,7 @@ struct { > return 0; > > err: > + rte_eal_mp_wait_lcore(); > rte_hash_free(tbl_rwc_test_param.h); > return -1; > } > @@ -960,12 +958,11 @@ struct { > write_type = WRITE_KEY_SHIFT; > ret = write_keys(write_type); > writer_done = 1; > - rte_eal_mp_wait_lcore(); > > if (ret < 0) > goto err; > for (i = 1; i <= rwc_core_cnt[n]; i++) > - if (lcore_config[i].ret < 0) > + if (rte_eal_wait_lcore(i) < 0) if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0) > goto err; > > unsigned long long cycles_per_lookup = > @@ -982,6 +979,7 @@ struct { > return 0; > > err: > + rte_eal_mp_wait_lcore(); > rte_hash_free(tbl_rwc_test_param.h); > return -1; > } > @@ -1035,12 +1033,11 @@ struct { > write_type = WRITE_KEY_SHIFT; > ret = write_keys(write_type); > writer_done = 1; > - rte_eal_mp_wait_lcore(); > > if (ret < 0) > goto err; > for (i = 1; i <= rwc_core_cnt[n]; i++) > - if (lcore_config[i].ret < 0) > + if (rte_eal_wait_lcore(i) < 0) if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0) > goto err; > > unsigned long long cycles_per_lookup = > @@ -1056,6 +1053,7 @@ struct { > return 0; > > err: > + rte_eal_mp_wait_lcore(); > rte_hash_free(tbl_rwc_test_param.h); > return -1; > } > @@ -1108,8 +1106,6 @@ struct { > > rte_hash_reset(tbl_rwc_test_param.h); > writer_done = 0; > - for (i = 0; i < 4; i++) > - multi_writer_done[i] = 0; > write_type = WRITE_NO_KEY_SHIFT; > if (write_keys(write_type) < 0) > goto err; > @@ -1133,15 +1129,15 @@ struct { > } > > /* Wait for writers to complete */ > - for (i = 0; i < rwc_core_cnt[m]; i++) > - while > - (multi_writer_done[i] == 0); > - writer_done = 1; > + for (i = rwc_core_cnt[n] + 1; > + i <= rwc_core_cnt[m] + rwc_core_cnt[n]; > + i++) > + rte_eal_wait_lcore(i); rte_eal_wait_lcore(enabled_core_ids[i]); > > - rte_eal_mp_wait_lcore(); > + writer_done = 1; > > for (i = 1; i <= rwc_core_cnt[n]; i++) > - if (lcore_config[i].ret < 0) > + if (rte_eal_wait_lcore(i) < 0) if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0) > goto err; > > unsigned long long cycles_per_lookup = > @@ -1160,6 +1156,7 @@ struct { > return 0; > > err: > + rte_eal_mp_wait_lcore(); > rte_hash_free(tbl_rwc_test_param.h); > return -1; > } > @@ -1222,10 +1219,9 @@ struct { > } > } > writer_done = 1; > - rte_eal_mp_wait_lcore(); > > for (i = 1; i <= rwc_core_cnt[n]; i++) > - if (lcore_config[i].ret < 0) > + if (rte_eal_wait_lcore(i) < 0) if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0) > goto err; > > unsigned long long cycles_per_lookup = > @@ -1242,6 +1238,7 @@ struct { > return 0; > > err: > + rte_eal_mp_wait_lcore(); > rte_hash_free(tbl_rwc_test_param.h); > return -1; > } > -- > 1.8.3.1 > Thank you, Dharmik Thakkar