Behavior for signed integer overflow is not defined which can causes undesired behavior at values near max and min bounds. The used of unsigned is defined as to use modulo arithmetic which is the desired behavior. This patch replaces int32_t with uint32_t except for necessary casts.
Fixes: 9ee9e0d3b85e ("net/ark: update to reflect FPGA updates") Cc: sta...@dpdk.org Signed-off-by: Ed Czeck <ed.cz...@atomicrules.com> --- v4: * fix typo in comment v3: * Clarify commit message. int_32_t overflow is undefined, while uint32_t is defined as modulo. * Additional comments in code. v2: * update patch to apply to dpdk-next-net --- drivers/net/ark/ark_ethdev_tx.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/drivers/net/ark/ark_ethdev_tx.c b/drivers/net/ark/ark_ethdev_tx.c index 9c89c85f50..16985bac22 100644 --- a/drivers/net/ark/ark_ethdev_tx.c +++ b/drivers/net/ark/ark_ethdev_tx.c @@ -39,8 +39,8 @@ struct __rte_cache_aligned ark_tx_queue { uint32_t queue_mask; /* 3 indexes to the paired data rings. */ - int32_t prod_index; /* where to put the next one */ - int32_t free_index; /* mbuf has been freed */ + uint32_t prod_index; /* where to put the next one */ + uint32_t free_index; /* mbuf has been freed */ /* The queue Id is used to identify the HW Q */ uint16_t phys_qid; @@ -49,7 +49,7 @@ struct __rte_cache_aligned ark_tx_queue { /* next cache line - fields written by device */ alignas(RTE_CACHE_LINE_MIN_SIZE) RTE_MARKER cacheline1; - volatile int32_t cons_index; /* hw is done, can be freed */ + volatile uint32_t cons_index; /* hw is done, can be freed */ }; /* Forward declarations */ @@ -108,7 +108,7 @@ eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) uint32_t user_meta[5]; int stat; - int32_t prod_index_limit; + uint32_t prod_index_limit; uint16_t nb; uint8_t user_len = 0; const uint32_t min_pkt_len = ARK_MIN_TX_PKTLEN; @@ -123,8 +123,13 @@ eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* leave 4 elements mpu data */ prod_index_limit = queue->queue_size + queue->free_index - 4; + /* Populate the buffer bringing prod_index up to or slightly beyond + * prod_index_limit. Prod_index will increament by 2 or more each + * iteration. Note: indexes are uint32_t, cast to (signed) int32_t + * to catch the slight overage case; e.g. (200 - 201) + */ for (nb = 0; - (nb < nb_pkts) && (prod_index_limit - queue->prod_index) > 0; + (nb < nb_pkts) && (int32_t)(prod_index_limit - queue->prod_index) > 0; ++nb) { mbuf = tx_pkts[nb]; @@ -194,13 +199,13 @@ eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf, uint32_t *user_meta, uint8_t meta_cnt) { struct rte_mbuf *next; - int32_t free_queue_space; + uint32_t free_queue_space; uint8_t flags = ARK_DDM_SOP; free_queue_space = queue->queue_mask - (queue->prod_index - queue->free_index); /* We need up to 4 mbufs for first header and 2 for subsequent ones */ - if (unlikely(free_queue_space < (2 + (2 * mbuf->nb_segs)))) + if (unlikely(free_queue_space < (2U + (2U * mbuf->nb_segs)))) return -1; while (mbuf != NULL) { @@ -392,10 +397,11 @@ free_completed_tx(struct ark_tx_queue *queue) { struct rte_mbuf *mbuf; union ark_tx_meta *meta; - int32_t top_index; + uint32_t top_index; top_index = queue->cons_index; /* read once */ - while ((top_index - queue->free_index) > 0) { + + while ((int32_t)(top_index - queue->free_index) > 0) { meta = &queue->meta_q[queue->free_index & queue->queue_mask]; if (likely((meta->flags & ARK_DDM_SOP) != 0)) { mbuf = queue->bufs[queue->free_index & -- 2.34.1