hi: This is test on dpdk version 1.2.3. Fixes: 2a2174801fa4 (" fix lpm bugs add strict if control,do not let tbl24 process run into tbl8 process add valid_group = valid, incase ,the valid_group is write to invalid")
add strict if control,do not let tbl24 process run into tbl8 process add valid_group = valid, incase ,the valid_group is write to invalid Regards yuerxin --- lib/librte_lpm/rte_lpm.c | 89 +++++++++++++++++++++++++++--------------------- 1 file changed, 50 insertions(+), 39 deletions(-) diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index bb1ec48..8fabf30 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -423,33 +423,35 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, continue; } - -/* If tbl24 entry is valid and extended calculate the index - * into tbl8. */ -tbl8_index = lpm->tbl24[tbl24_index].tbl8_gindex * -RTE_LPM_TBL8_GROUP_NUM_ENTRIES; -tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - -for (j = tbl8_index; j < tbl8_group_end; j++) { -if (!lpm->tbl8[j].valid || -lpm->tbl8[j].depth <= depth) { -struct rte_lpm_tbl8_entry new_tbl8_entry = { -.valid = VALID, -.valid_group = VALID, -.depth = depth, -.next_hop = next_hop, -}; - -/* - * Setting tbl8 entry in one go to avoid race - * conditions - */ -lpm->tbl8[j] = new_tbl8_entry; - -continue; -} -} -} + if (lpm->tbl24[i].ext_entry == 1) { + + /* If tbl24 entry is valid and extended calculate the index + * into tbl8. */ + tbl8_index = lpm->tbl24[i].tbl8_gindex * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + + for (j = tbl8_index; j < tbl8_group_end; j++) { + if (!lpm->tbl8[j].valid || + lpm->tbl8[j].depth <= depth) { + struct rte_lpm_tbl8_entry new_tbl8_entry = { + .valid = VALID, + .valid_group = VALID, + .depth = depth, + .next_hop = next_hop, + }; + + /* + * Setting tbl8 entry in one go to avoid race + * conditions + */ + lpm->tbl8[j] = new_tbl8_entry; + + continue; + } + } + } + } return 0; } @@ -569,6 +571,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, .valid = VALID, .depth = depth, .next_hop = next_hop, + .valid_group = lpm->tbl8[i].valid_group, //save for old groups g_valid flags }; /* @@ -634,7 +637,7 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, } static inline int32_t -find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) +find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t *sub_rule_depth) { int32_t rule_index; uint32_t ip_masked; @@ -645,8 +648,10 @@ find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) rule_index = rule_find(lpm, ip_masked, prev_depth); -if (rule_index >= 0) +if (rule_index >= 0) { + *sub_rule_depth = prev_depth; return rule_index; + } } return -1; @@ -654,7 +659,7 @@ find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) static inline int32_t delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, -uint8_t depth, int32_t sub_rule_index) +uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j; uint8_t new_depth; @@ -677,7 +682,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, lpm->tbl24[i].depth <= depth ) { lpm->tbl24[i].valid = INVALID; } -else { +else if (lpm->tbl24[i].ext_entry == 1) { /* * If TBL24 entry is extended, then there has * to be a rule with depth >= 25 in the @@ -703,19 +708,21 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, */ /* Calculate depth of sub_rule. */ + new_depth = (uint8_t) (sub_rule_index / lpm->max_rules_per_depth); struct rte_lpm_tbl24_entry new_tbl24_entry = { .valid = VALID, .ext_entry = 0, -.depth = new_depth, + .depth = sub_rule_depth, {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,} }; struct rte_lpm_tbl8_entry new_tbl8_entry = { .valid = VALID, -.depth = new_depth, + .valid_group = VALID, +.depth = sub_rule_depth, .next_hop = lpm->rules_tbl [sub_rule_index].next_hop, }; @@ -726,7 +733,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, lpm->tbl24[i].depth <= depth ) { lpm->tbl24[i] = new_tbl24_entry; } -else { +else if (lpm->tbl24[i].ext_entry == 1){ /* * If TBL24 entry is extended, then there has * to be a rule with depth >= 25 in the @@ -807,7 +814,7 @@ tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start) static inline int32_t delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, -uint8_t depth, int32_t sub_rule_index) +uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index, tbl8_range, i; @@ -843,7 +850,8 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, /* Set new tbl8 entry. */ struct rte_lpm_tbl8_entry new_tbl8_entry = { .valid = VALID, -.depth = new_depth, +.valid_group = lpm->tbl8[tbl8_group_start].valid_group, +.depth = sub_rule_depth, .next_hop = lpm->rules_tbl[sub_rule_index].next_hop, }; @@ -900,6 +908,7 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) { int32_t rule_to_delete_index, sub_rule_index; uint32_t ip_masked; + uint8_t sub_rule_depth; /* * Check input arguments. Note: IP must be a positive integer of 32 * bits in length therefore it need not be checked. @@ -931,7 +940,8 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) * replace the rule_to_delete we return -1 and invalidate the table * entries associated with this rule. */ -sub_rule_index = find_previous_rule(lpm, ip, depth); + sub_rule_depth = 0; +sub_rule_index = find_previous_rule(lpm, ip, depth,&sub_rule_depth); /* * If the input depth value is less than 25 use function @@ -939,10 +949,11 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) */ if (depth <= MAX_DEPTH_TBL24) { return delete_depth_small(lpm, ip_masked, depth, -sub_rule_index); +sub_rule_index, sub_rule_depth); } else { /* If depth > MAX_DEPTH_TBL24 */ -return delete_depth_big(lpm, ip_masked, depth, sub_rule_index); +return delete_depth_big(lpm, ip_masked, depth, sub_rule_index, + sub_rule_depth); } } -- 1.8.5.2 (Apple Git-48)