From: Konstantin Ananyev <konstantin.anan...@huawei.com> ../drivers/net/ice/ice_rxtx.c:1871:29: warning: variable length array used [-Wvla]
Here VLA is used as a temp array for mbufs that will be used as a split RX data buffers. As at any given time only one thread can do RX from particular queue, at rx_queue_setup() we can allocate extra space for that array, and then safely use it at RX fast-path. Signed-off-by: Konstantin Ananyev <konstantin.anan...@huawei.com> Acked-by: Anatoly Burakov <anatoly.bura...@intel.com> --- drivers/net/intel/ice/ice_rxtx.c | 18 ++++++++++++------ drivers/net/intel/ice/ice_rxtx.h | 2 ++ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/net/intel/ice/ice_rxtx.c b/drivers/net/intel/ice/ice_rxtx.c index 8dd8644b16..437cf0d807 100644 --- a/drivers/net/intel/ice/ice_rxtx.c +++ b/drivers/net/intel/ice/ice_rxtx.c @@ -1172,7 +1172,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, struct ice_vsi *vsi = pf->main_vsi; struct ice_rx_queue *rxq; const struct rte_memzone *rz; - uint32_t ring_size; + uint32_t ring_size, tlen; uint16_t len; int use_def_burst_func = 1; uint64_t offloads; @@ -1280,9 +1280,14 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, /* always reserve more for bulk alloc */ len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST); + /* allocate extra entries for SW split buffer */ + tlen = ((rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0) ? + rxq->rx_free_thresh : 0; + tlen += len; + /* Allocate the software ring. */ rxq->sw_ring = rte_zmalloc_socket(NULL, - sizeof(struct ice_rx_entry) * len, + sizeof(struct ice_rx_entry) * tlen, RTE_CACHE_LINE_SIZE, socket_id); if (!rxq->sw_ring) { @@ -1291,6 +1296,8 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } + rxq->sw_split_buf = (tlen == len) ? NULL : rxq->sw_ring + len; + ice_reset_rx_queue(rxq); rxq->q_set = true; dev->data->rx_queues[queue_idx] = rxq; @@ -1864,7 +1871,6 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) uint64_t dma_addr; int diag, diag_pay; uint64_t pay_addr; - struct rte_mbuf *mbufs_pay[rxq->rx_free_thresh]; /* Allocate buffers in bulk */ alloc_idx = (uint16_t)(rxq->rx_free_trigger - @@ -1879,7 +1885,7 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { diag_pay = rte_mempool_get_bulk(rxq->rxseg[1].mp, - (void *)mbufs_pay, rxq->rx_free_thresh); + (void *)rxq->sw_split_buf, rxq->rx_free_thresh); if (unlikely(diag_pay != 0)) { rte_mempool_put_bulk(rxq->mp, (void *)rxep, rxq->rx_free_thresh); @@ -1906,8 +1912,8 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) rxdp[i].read.hdr_addr = 0; rxdp[i].read.pkt_addr = dma_addr; } else { - mb->next = mbufs_pay[i]; - pay_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbufs_pay[i])); + mb->next = rxq->sw_split_buf[i].mbuf; + pay_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb->next)); rxdp[i].read.hdr_addr = dma_addr; rxdp[i].read.pkt_addr = pay_addr; } diff --git a/drivers/net/intel/ice/ice_rxtx.h b/drivers/net/intel/ice/ice_rxtx.h index f9293ac6f9..276d40b57f 100644 --- a/drivers/net/intel/ice/ice_rxtx.h +++ b/drivers/net/intel/ice/ice_rxtx.h @@ -139,6 +139,8 @@ struct ice_rx_queue { uint32_t hw_time_high; /* high 32 bits of timestamp */ uint32_t hw_time_low; /* low 32 bits of timestamp */ uint64_t hw_time_update; /* SW time of HW record updating */ + struct ice_rx_entry *sw_split_buf; + /* address of temp buffer for RX split mbufs */ struct rte_eth_rxseg_split rxseg[ICE_RX_MAX_NSEG]; uint32_t rxseg_nb; bool ts_enable; /* if rxq timestamp is enabled */ -- 2.47.2.vfs.0.1