> -----Original Message-----
> From: Junfeng Guo <junfeng....@intel.com>
> Sent: Monday, January 30, 2023 12:27 AM
> To: qi.z.zh...@intel.com; jingjing...@intel.com; ferruh.yi...@amd.com;
> beilei.x...@intel.com
> Cc: dev@dpdk.org; xiaoyun...@intel.com; helin.zh...@intel.com; Junfeng Guo
> <junfeng....@intel.com>; Rushil Gupta <rush...@google.com>; Jordan
> Kimbrough <jr...@google.com>; Jeroen de Borst <jeroe...@google.com>
> Subject: [RFC v2 6/9] net/gve: support basic Rx data path for DQO
> 
> Add basic Rx data path support for DQO.
> 
> Signed-off-by: Junfeng Guo <junfeng....@intel.com>
> Signed-off-by: Rushil Gupta <rush...@google.com>
> Signed-off-by: Jordan Kimbrough <jr...@google.com>
> Signed-off-by: Jeroen de Borst <jeroe...@google.com>
> ---
>  drivers/net/gve/gve_ethdev.c |   1 +
>  drivers/net/gve/gve_ethdev.h |   3 +
>  drivers/net/gve/gve_rx_dqo.c | 128 +++++++++++++++++++++++++++++++++++
>  3 files changed, 132 insertions(+)
> 
> diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c index
> 512a038968..89e3f09c37 100644
> --- a/drivers/net/gve/gve_ethdev.c
> +++ b/drivers/net/gve/gve_ethdev.c
> @@ -703,6 +703,7 @@ gve_dev_init(struct rte_eth_dev *eth_dev)
>       } else {
>               /* override Tx/Rx setup/release eth_dev ops */
>               gve_eth_dev_ops_override(&gve_local_eth_dev_ops);
> +             eth_dev->rx_pkt_burst = gve_rx_burst_dqo;
>               eth_dev->tx_pkt_burst = gve_tx_burst_dqo;
>       }
> 
> diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index
> ba657dd6c1..d434f9babe 100644
> --- a/drivers/net/gve/gve_ethdev.h
> +++ b/drivers/net/gve/gve_ethdev.h
> @@ -366,6 +366,9 @@ gve_stop_tx_queues_dqo(struct rte_eth_dev *dev);
> void  gve_stop_rx_queues_dqo(struct rte_eth_dev *dev);
> 
> +uint16_t
> +gve_rx_burst_dqo(void *rxq, struct rte_mbuf **rx_pkts, uint16_t
> +nb_pkts);
> +
>  uint16_t
>  gve_tx_burst_dqo(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
> 
> diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c index
> aca6f8ea2d..244517ce5d 100644
> --- a/drivers/net/gve/gve_rx_dqo.c
> +++ b/drivers/net/gve/gve_rx_dqo.c
> @@ -5,6 +5,134 @@
>  #include "gve_ethdev.h"
>  #include "base/gve_adminq.h"
> 
> +static inline void
> +gve_rx_refill_dqo(struct gve_rx_queue *rxq) {
> +     volatile struct gve_rx_desc_dqo *rx_buf_ring;
> +     volatile struct gve_rx_desc_dqo *rx_buf_desc;
> +     struct rte_mbuf *nmb[rxq->free_thresh];
> +     uint16_t nb_refill = rxq->free_thresh;
> +     uint16_t nb_desc = rxq->nb_rx_desc;
> +     uint16_t next_avail = rxq->bufq_tail;
> +     struct rte_eth_dev *dev;
> +     uint64_t dma_addr;
> +     uint16_t delta;
> +     int i;
> +
> +     if (rxq->nb_rx_hold < rxq->free_thresh)
> +             return;
> +
> +     rx_buf_ring = rxq->rx_ring;
> +     delta = nb_desc - next_avail;
> +     if (unlikely(delta < nb_refill)) {
> +             if (likely(rte_pktmbuf_alloc_bulk(rxq->mpool, nmb, delta) ==
> 0)) {
> +                     for (i = 0; i < delta; i++) {
> +                             rx_buf_desc = &rx_buf_ring[next_avail + i];
> +                             rxq->sw_ring[next_avail + i] = nmb[i];
> +                             dma_addr =
> rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
> +                             rx_buf_desc->header_buf_addr = 0;
> +                             rx_buf_desc->buf_addr = dma_addr;
> +                     }
> +                     nb_refill -= delta;
> +                     next_avail = 0;
> +                     rxq->nb_rx_hold -= delta;
> +             } else {
> +                     dev = &rte_eth_devices[rxq->port_id];
> +                     dev->data->rx_mbuf_alloc_failed += nb_desc -
> next_avail;
> +                     PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed
> port_id=%u queue_id=%u",
> +                                 rxq->port_id, rxq->queue_id);
> +                     return;
> +             }
> +     }
> +
> +     if (nb_desc - next_avail >= nb_refill) {
> +             if (likely(rte_pktmbuf_alloc_bulk(rxq->mpool, nmb, nb_refill)
> == 0)) {
> +                     for (i = 0; i < nb_refill; i++) {
> +                             rx_buf_desc = &rx_buf_ring[next_avail + i];
> +                             rxq->sw_ring[next_avail + i] = nmb[i];
> +                             dma_addr =
> rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
> +                             rx_buf_desc->header_buf_addr = 0;
> +                             rx_buf_desc->buf_addr = dma_addr;
> +                     }
> +                     next_avail += nb_refill;
> +                     rxq->nb_rx_hold -= nb_refill;
> +             } else {
> +                     dev = &rte_eth_devices[rxq->port_id];
> +                     dev->data->rx_mbuf_alloc_failed += nb_desc -
> next_avail;
> +                     PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed
> port_id=%u queue_id=%u",
> +                                 rxq->port_id, rxq->queue_id);
> +             }
> +     }
> +
> +     rte_write32(next_avail, rxq->qrx_tail);
> +
> +     rxq->bufq_tail = next_avail;
> +}
> +
> +uint16_t
> +gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t
> +nb_pkts) {
> +     volatile struct gve_rx_compl_desc_dqo *rx_compl_ring;
> +     volatile struct gve_rx_compl_desc_dqo *rx_desc;
> +     struct gve_rx_queue *rxq;
> +     struct rte_mbuf *rxm;
> +     uint16_t rx_id_bufq;
> +     uint16_t pkt_len;
> +     uint16_t rx_id;
> +     uint16_t nb_rx;
> +
> +     nb_rx = 0;
> +     rxq = rx_queue;
> +     rx_id = rxq->rx_tail;
> +     rx_id_bufq = rxq->next_avail;
> +     rx_compl_ring = rxq->compl_ring;
> +
> +     while (nb_rx < nb_pkts) {
> +             rx_desc = &rx_compl_ring[rx_id];
> +
> +             /* check status */
> +             if (rx_desc->generation != rxq->cur_gen_bit)
> +                     break;
>From my experience with other PMDs, I think an IO read barrier is needed here 
>to ensure other parts of descriptors are not loaded before loading 
>rx_desc->generation.

> +
> +             if (unlikely(rx_desc->rx_error))
> +                     continue;
> +
> +             pkt_len = rx_desc->packet_len;
> +
> +             rx_id++;
> +             if (rx_id == rxq->nb_rx_desc) {
> +                     rx_id = 0;
> +                     rxq->cur_gen_bit ^= 1;
> +             }
> +
> +             rxm = rxq->sw_ring[rx_id_bufq];
> +             rx_id_bufq++;
> +             if (rx_id_bufq == rxq->nb_rx_desc)
> +                     rx_id_bufq = 0;
> +             rxq->nb_rx_hold++;
> +
> +             rxm->pkt_len = pkt_len;
> +             rxm->data_len = pkt_len;
> +             rxm->port = rxq->port_id;
> +             rxm->ol_flags = 0;
> +
> +             rxm->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
> +             rxm->hash.rss = rte_be_to_cpu_32(rx_desc->hash);
> +
> +             rx_pkts[nb_rx++] = rxm;
> +     }
> +
> +     if (nb_rx > 0) {
> +             rxq->rx_tail = rx_id;
> +             if (rx_id_bufq != rxq->next_avail)
> +                     rxq->next_avail = rx_id_bufq;
> +
> +             gve_rx_refill_dqo(rxq);
> +     }
> +
> +     return nb_rx;
> +}
> +
>  static inline void
>  gve_release_rxq_mbufs_dqo(struct gve_rx_queue *rxq)  {
> --
> 2.34.1

Reply via email to