When adding a "depth small" entry, if its extended flag is not set and its depth is smaller than the one in the tbl24, nothing should be done otherwise will operate on the wrong memory area.
Signed-off-by: Zhe Tao <zhe.tao at intel.com> --- lib/librte_lpm/rte_lpm.c | 51 +++++++++++++++++++++++++----------------------- 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index de05307..0ef2421 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -447,30 +447,33 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, continue; } - - /* If tbl24 entry is valid and extended calculate the index - * into tbl8. */ - tbl8_index = lpm->tbl24[i].tbl8_gindex * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - - for (j = tbl8_index; j < tbl8_group_end; j++) { - if (!lpm->tbl8[j].valid || - lpm->tbl8[j].depth <= depth) { - struct rte_lpm_tbl8_entry new_tbl8_entry = { - .valid = VALID, - .valid_group = VALID, - .depth = depth, - .next_hop = next_hop, - }; - - /* - * Setting tbl8 entry in one go to avoid race - * conditions - */ - lpm->tbl8[j] = new_tbl8_entry; - - continue; + + if (lpm->tbl24[i].ext_entry == 1) { + + /* If tbl24 entry is valid and extended calculate the index + * into tbl8. */ + tbl8_index = lpm->tbl24[i].tbl8_gindex * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + + for (j = tbl8_index; j < tbl8_group_end; j++) { + if (!lpm->tbl8[j].valid || + lpm->tbl8[j].depth <= depth) { + struct rte_lpm_tbl8_entry new_tbl8_entry = { + .valid = VALID, + .valid_group = VALID, + .depth = depth, + .next_hop = next_hop, + }; + + /* + * Setting tbl8 entry in one go to avoid race + * conditions + */ + lpm->tbl8[j] = new_tbl8_entry; + + continue; + } } } } -- 1.9.3