Hi Wang,

On 27/06/2019 10:37, Ruifeng Wang wrote:
When a tbl8 group is getting attached to a tbl24 entry, lookup
might fail even though the entry is configured in the table.

For ex: consider a LPM table configured with 10.10.10.1/24.
When a new entry 10.10.10.32/28 is being added, a new tbl8
group is allocated and tbl24 entry is changed to point to
the tbl8 group. If the tbl24 entry is written without the tbl8
group entries updated, a lookup on 10.10.10.9 will return
failure.

Correct memory orderings are required to ensure that the
store to tbl24 does not happen before the stores to tbl8 group
entries complete.

Suggested-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>
Signed-off-by: Ruifeng Wang <ruifeng.w...@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>
Reviewed-by: Gavin Hu <gavin...@arm.com>
---
v3: no changes
v2: fixed clang building issue by supplying alignment attribute.
v1: initail version

  lib/librte_lpm/rte_lpm.c | 31 ++++++++++++++++++++++++-------
  lib/librte_lpm/rte_lpm.h |  4 ++--
  2 files changed, 26 insertions(+), 9 deletions(-)

diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
index fabd13fb0..5f8d494ae 100644
--- a/lib/librte_lpm/rte_lpm.c
+++ b/lib/librte_lpm/rte_lpm.c
@@ -737,7 +737,8 @@ add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, 
uint8_t depth,
                        /* Setting tbl24 entry in one go to avoid race
                         * conditions
                         */
-                       lpm->tbl24[i] = new_tbl24_entry;
+                       __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
+                                       __ATOMIC_RELEASE);
continue;
                }
@@ -892,7 +893,8 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t 
ip_masked, uint8_t depth,
                        .depth = 0,
                };
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+               __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+                               __ATOMIC_RELEASE);
} /* If valid entry but not extended calculate the index into Table8. */
        else if (lpm->tbl24[tbl24_index].valid_group == 0) {
@@ -938,7 +940,8 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t 
ip_masked, uint8_t depth,
                                .depth = 0,
                };
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+               __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+                               __ATOMIC_RELEASE);
} else { /*
                * If it is valid, extended entry calculate the index into tbl8.
@@ -1320,7 +1323,14 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t 
ip_masked,
if (lpm->tbl24[i].valid_group == 0 &&
                                        lpm->tbl24[i].depth <= depth) {
-                               lpm->tbl24[i].valid = INVALID;
+                               struct rte_lpm_tbl_entry_v20 zero_tbl_entry = {
+                                               .valid = INVALID,
+                                               .depth = 0,
+                                               .valid_group = 0,
+                                       };
+                                       zero_tbl_entry.next_hop = 0;

Please use the same var name in both v20 and v1604 (zero_tbl24_entry). The same is for struct initialization. In 1604 you use:

struct rte_lpm_tbl_entry zero_tbl24_entry = {0};

+                               __atomic_store(&lpm->tbl24[i], &zero_tbl_entry,
+                                               __ATOMIC_RELEASE);
                        } else if (lpm->tbl24[i].valid_group == 1) {
                                /*
                                 * If TBL24 entry is extended, then there has
@@ -1365,7 +1375,8 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t 
ip_masked,
if (lpm->tbl24[i].valid_group == 0 &&
                                        lpm->tbl24[i].depth <= depth) {
-                               lpm->tbl24[i] = new_tbl24_entry;
+                               __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
+                                               __ATOMIC_RELEASE);
                        } else  if (lpm->tbl24[i].valid_group == 1) {
                                /*
                                 * If TBL24 entry is extended, then there has
@@ -1647,8 +1658,11 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t 
ip_masked,
        tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, 
tbl8_group_start);
if (tbl8_recycle_index == -EINVAL) {
-               /* Set tbl24 before freeing tbl8 to avoid race condition. */
+               /* Set tbl24 before freeing tbl8 to avoid race condition.
+                * Prevent the free of the tbl8 group from hoisting.
+                */
                lpm->tbl24[tbl24_index].valid = 0;
+               __atomic_thread_fence(__ATOMIC_RELEASE);
                tbl8_free_v20(lpm->tbl8, tbl8_group_start);
        } else if (tbl8_recycle_index > -1) {
                /* Update tbl24 entry. */
@@ -1659,8 +1673,11 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t 
ip_masked,
                        .depth = lpm->tbl8[tbl8_recycle_index].depth,
                };
- /* Set tbl24 before freeing tbl8 to avoid race condition. */
+               /* Set tbl24 before freeing tbl8 to avoid race condition.
+                * Prevent the free of the tbl8 group from hoisting.
+                */
                lpm->tbl24[tbl24_index] = new_tbl24_entry;
+               __atomic_thread_fence(__ATOMIC_RELEASE);
                tbl8_free_v20(lpm->tbl8, tbl8_group_start);
        }
diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h
index 6f5704c5c..98c70ecbe 100644
--- a/lib/librte_lpm/rte_lpm.h
+++ b/lib/librte_lpm/rte_lpm.h
@@ -88,7 +88,7 @@ struct rte_lpm_tbl_entry_v20 {
         */
        uint8_t valid_group :1;
        uint8_t depth       :6; /**< Rule depth. */
-};
+} __rte_aligned(2);

I think it is better to __rte_aligned(sizeof(uint16_t)).

__extension__
  struct rte_lpm_tbl_entry {
@@ -121,7 +121,7 @@ struct rte_lpm_tbl_entry_v20 {
                uint8_t group_idx;
                uint8_t next_hop;
        };
-};
+} __rte_aligned(2);
__extension__
  struct rte_lpm_tbl_entry {

As a general remark consider writing all of the tbl entries including tbl8 with atomic_store. Now "lpm->tbl8[j] = new_tbl8_entry;" is looks like

     1e9:       44 88 9c 47 40 01 00    mov %r11b,0x2000140(%rdi,%rax,2) <-write first byte
     1f0:       02
     1f1:       48 83 c0 01             add    $0x1,%rax
     1f5:       42 88 8c 47 41 01 00    mov %cl,0x2000141(%rdi,%r8,2) <-write second byte
     1fc:       02

This may cause an incorrect nexthop to be returned. If the byte with valid flag is updated first, the old(and maybe invalid) next hop could be returned.

Please evaluate performance drop after.

--
Regards,
Vladimir

Reply via email to