Enable packets revcieve and transmit functions.

Signed-off-by: Jingjing Wu <jingjing...@intel.com>
Signed-off-by: Xiuchun Lu <xiuchun...@intel.com>
Signed-off-by: Miao Li <miao...@intel.com>
---
 drivers/net/iavf_be/iavf_be_ethdev.c |   3 +
 drivers/net/iavf_be/iavf_be_rxtx.c   | 342 +++++++++++++++++++++++++++
 drivers/net/iavf_be/iavf_be_rxtx.h   |  60 +++++
 3 files changed, 405 insertions(+)

diff --git a/drivers/net/iavf_be/iavf_be_ethdev.c 
b/drivers/net/iavf_be/iavf_be_ethdev.c
index 940ed66ce4..4bf936f21b 100644
--- a/drivers/net/iavf_be/iavf_be_ethdev.c
+++ b/drivers/net/iavf_be/iavf_be_ethdev.c
@@ -864,6 +864,9 @@ eth_dev_iavfbe_create(struct rte_vdev_device *dev,
        rte_ether_addr_copy(addr, &eth_dev->data->mac_addrs[0]);
 
        eth_dev->dev_ops = &iavfbe_eth_dev_ops;
+       eth_dev->rx_pkt_burst = &iavfbe_recv_pkts;
+       eth_dev->tx_pkt_burst = &iavfbe_xmit_pkts;
+       eth_dev->tx_pkt_prepare = &iavfbe_prep_pkts;
 
        eth_dev->data->dev_link = iavfbe_link;
        eth_dev->data->numa_node = dev->device.numa_node;
diff --git a/drivers/net/iavf_be/iavf_be_rxtx.c 
b/drivers/net/iavf_be/iavf_be_rxtx.c
index dd275b80c6..66f30cc0a8 100644
--- a/drivers/net/iavf_be/iavf_be_rxtx.c
+++ b/drivers/net/iavf_be/iavf_be_rxtx.c
@@ -162,3 +162,345 @@ iavfbe_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t 
queue_id,
        qinfo->conf.offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
        qinfo->conf.tx_deferred_start = false;
 }
+
+static inline void
+iavfbe_recv_offload(struct rte_mbuf *m,
+       uint16_t cmd, uint32_t offset)
+{
+       m->l2_len = offset & IAVF_TXD_QW1_MACLEN_MASK >>
+               IAVF_TX_DESC_LENGTH_MACLEN_SHIFT << 1;
+       m->l3_len = offset & IAVF_TXD_QW1_IPLEN_MASK >>
+               IAVF_TX_DESC_LENGTH_IPLEN_SHIFT << 2;
+       m->l4_len = offset & IAVF_TXD_QW1_L4LEN_MASK >>
+               IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT << 2;
+
+       switch (cmd & IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM) {
+       case IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM:
+               m->ol_flags = PKT_TX_IP_CKSUM;
+               break;
+       case IAVF_TX_DESC_CMD_IIPT_IPV4:
+               m->ol_flags = PKT_TX_IPV4;
+               break;
+       case IAVF_TX_DESC_CMD_IIPT_IPV6:
+               m->ol_flags = PKT_TX_IPV6;
+               break;
+       default:
+               break;
+       }
+
+       switch (cmd & IAVF_TX_DESC_CMD_L4T_EOFT_UDP) {
+       case IAVF_TX_DESC_CMD_L4T_EOFT_UDP:
+               m->ol_flags |= PKT_TX_UDP_CKSUM;
+               break;
+       case IAVF_TX_DESC_CMD_L4T_EOFT_SCTP:
+               m->ol_flags |= PKT_TX_SCTP_CKSUM;
+               break;
+       case IAVF_TX_DESC_CMD_L4T_EOFT_TCP:
+               m->ol_flags |= PKT_TX_TCP_CKSUM;
+               break;
+       default:
+               break;
+       }
+}
+
+/* RX function */
+uint16_t
+iavfbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+       struct iavfbe_rx_queue *rxq = (struct iavfbe_rx_queue *)rx_queue;
+       struct iavfbe_adapter *adapter = (struct iavfbe_adapter *)rxq->adapter;
+       uint32_t nb_rx = 0;
+       uint16_t head, tail;
+       uint16_t cmd;
+       uint32_t offset;
+       volatile struct iavf_tx_desc *ring_dma;
+       struct rte_ether_addr *ea = NULL;
+       uint64_t ol_flags, tso_segsz = 0;
+
+       rte_spinlock_lock(&rxq->access_lock);
+
+       if (unlikely(rte_atomic32_read(&rxq->enable) == 0)) {
+               /* RX queue is not enable currently */
+               goto end_unlock;
+       }
+
+       ring_dma = rxq->tx_ring;
+       head = rxq->tx_head;
+       tail = (uint16_t)IAVFBE_READ_32(rxq->qtx_tail);
+
+       while (head != tail && nb_rx < nb_pkts) {
+               volatile struct iavf_tx_desc *d;
+               void *desc_addr;
+               uint64_t data_len, tmp;
+               struct rte_mbuf *cur, *rxm, *first = NULL;
+
+               ol_flags = 0;
+               while (1) {
+                       d = &ring_dma[head];
+                       head++;
+
+                       if (unlikely(head == rxq->nb_rx_desc))
+                               head = 0;
+
+                       if ((head & 0x3) == 0) {
+                               rte_prefetch0(&ring_dma[head]);
+                       }
+
+                       IAVF_BE_DUMP_TX_DESC(rxq, d, head);
+
+                       if ((d->cmd_type_offset_bsz &
+                            IAVF_TXD_QW1_DTYPE_MASK) ==
+                           IAVF_TX_DESC_DTYPE_CONTEXT) {
+                               ol_flags = PKT_TX_TCP_SEG;
+                               tso_segsz = (d->cmd_type_offset_bsz &
+                                            IAVF_TXD_CTX_QW1_MSS_MASK) >>
+                                           IAVF_TXD_CTX_QW1_MSS_SHIFT;
+                               d = &ring_dma[head];
+                               head++;
+                       }
+
+                       cmd = (d->cmd_type_offset_bsz &IAVF_TXD_QW1_CMD_MASK) >>
+                               IAVF_TXD_QW1_CMD_SHIFT;
+                       offset = (d->cmd_type_offset_bsz & 
IAVF_TXD_QW1_OFFSET_MASK) >>
+                               IAVF_TXD_QW1_OFFSET_SHIFT;
+
+                       rxm = rte_pktmbuf_alloc(rxq->mp);
+                       if (unlikely(rxm == NULL)) {
+                               IAVF_BE_LOG(ERR, "Failed to allocate mbuf\n");
+                               break;
+                       }
+
+                       data_len = (rte_le_to_cpu_64(d->cmd_type_offset_bsz)
+                                               & IAVF_TXD_QW1_TX_BUF_SZ_MASK)
+                               >> IAVF_TXD_QW1_TX_BUF_SZ_SHIFT;
+                       if (data_len > rte_pktmbuf_tailroom(rxm)) {
+                               rte_pktmbuf_free(rxm);
+                               rte_pktmbuf_free(first);
+                               goto end_of_recv;
+                       }
+                       tmp = data_len;
+                       desc_addr = (void 
*)(uintptr_t)rte_iavf_emu_get_dma_vaddr(
+                               adapter->mem_table, d->buffer_addr, &tmp);
+
+                       rte_prefetch0(desc_addr);
+                       rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, 
RTE_PKTMBUF_HEADROOM));
+
+                       rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+                       rte_memcpy(rte_pktmbuf_mtod(rxm, void *), desc_addr, 
data_len);
+
+                       rxm->nb_segs = 1;
+                       rxm->next = NULL;
+                       rxm->pkt_len = data_len;
+                       rxm->data_len = data_len;
+
+                       if (cmd & IAVF_TX_DESC_CMD_IL2TAG1)
+                               rxm->vlan_tci = (d->cmd_type_offset_bsz &
+                                                IAVF_TXD_QW1_L2TAG1_MASK) >>
+                                               IAVF_TXD_QW1_TX_BUF_SZ_SHIFT;
+
+                       if (cmd & IAVF_TX_DESC_CMD_RS)
+                               d->cmd_type_offset_bsz =
+                                       
rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
+
+                       if (!first) {
+                               first = rxm;
+                               cur = rxm;
+                               iavfbe_recv_offload(rxm, cmd, offset);
+                               /* TSO enabled */
+                               if (ol_flags & PKT_TX_TCP_SEG) {
+                                       rxm->tso_segsz = tso_segsz;
+                                       rxm->ol_flags |= ol_flags;
+                               }
+                       } else {
+                               first->pkt_len += (uint32_t)data_len;
+                               first->nb_segs++;
+                               cur->next = rxm;
+                               cur = rxm;
+                       }
+
+                       if (cmd & IAVF_TX_DESC_CMD_EOP)
+                               break;
+               }
+
+               if ((!(ol_flags & PKT_TX_TCP_SEG)) &&
+                   (first->pkt_len > rxq->max_pkt_len)) {
+                       rte_pktmbuf_free(first);
+                       goto end_of_recv;
+               }
+
+               rx_pkts[nb_rx] = first;
+               nb_rx++;
+
+               /* Count multicast and broadcast */
+               ea = rte_pktmbuf_mtod(first, struct rte_ether_addr *);
+               if (rte_is_multicast_ether_addr(ea)) {
+                       if (rte_is_broadcast_ether_addr(ea))
+                               rxq->stats.recv_broad_num++;
+                       else
+                               rxq->stats.recv_multi_num++;
+               }
+
+               rxq->stats.recv_pkt_num++;
+               rxq->stats.recv_bytes += first->pkt_len;
+       }
+
+end_of_recv:
+       rxq->tx_head = head;
+end_unlock:
+       rte_spinlock_unlock(&rxq->access_lock);
+
+       return nb_rx;
+}
+
+/* TX function */
+uint16_t
+iavfbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+       struct iavfbe_tx_queue *txq = (struct iavfbe_tx_queue *)tx_queue;
+       struct iavfbe_adapter *adapter = (struct iavfbe_adapter *)txq->adapter;
+       volatile union iavf_rx_desc *ring_dma;
+       volatile union iavf_rx_desc *d;
+       struct rte_ether_addr *ea = NULL;
+       struct rte_mbuf *pkt, *m;
+       uint16_t head, tail;
+       uint16_t nb_tx = 0;
+       uint16_t nb_avail; /* number of avail desc */
+       void *desc_addr;
+       uint64_t  len, data_len;
+       uint32_t pkt_len;
+       uint64_t qword1;
+
+       rte_spinlock_lock(&txq->access_lock);
+
+       if (unlikely(rte_atomic32_read(&txq->enable) == 0)) {
+               /* TX queue is not enable currently */
+               goto end_unlock;
+       }
+
+       len = 1;
+       head = txq->rx_head;
+       ring_dma = txq->rx_ring;
+       tail = (uint16_t)IAVFBE_READ_32(txq->qrx_tail);
+       nb_avail = (tail >= head) ?
+               (tail - head) : (txq->nb_tx_desc - tail + head);
+
+       while (nb_avail > 0 && nb_tx < nb_pkts) {
+               pkt = tx_pkts[nb_tx];
+               pkt_len = rte_pktmbuf_pkt_len(pkt);
+
+               if (pkt->nb_segs > nb_avail) /* no desc to use */
+                       goto end_of_xmit;
+
+               m = pkt;
+
+               do {
+                       qword1 = 0;
+                       d = &ring_dma[head];
+                       data_len = rte_pktmbuf_data_len(m);
+                       desc_addr = (void 
*)(uintptr_t)rte_iavf_emu_get_dma_vaddr(
+                               adapter->mem_table,
+                               rte_le_to_cpu_64(d->read.pkt_addr),
+                               &len);
+
+                       rte_memcpy(desc_addr, rte_pktmbuf_mtod(m, void *),
+                                  data_len);
+
+                       /* If pkt carries vlan info, post it to descriptor */
+                       if (m->ol_flags & (PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN)) 
{
+                               qword1 |= 1 << 
IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT;
+                               d->wb.qword0.lo_dword.l2tag1 =
+                                       rte_cpu_to_le_16(pkt->vlan_tci);
+                       }
+                       m = m->next;
+                       /* Mark the last desc with EOP flag */
+                       if (!m)
+                               qword1 |=
+                                       ((1 << IAVF_RX_DESC_STATUS_EOF_SHIFT)
+                                        << IAVF_RXD_QW1_STATUS_SHIFT);
+
+                       qword1 = qword1 |
+                               ((1 << IAVF_RX_DESC_STATUS_DD_SHIFT)
+                               << IAVF_RXD_QW1_STATUS_SHIFT) |
+                               ((data_len << IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
+                               & IAVF_RXD_QW1_LENGTH_PBUF_MASK);
+
+                       rte_wmb();
+
+                       d->wb.qword1.status_error_len = 
rte_cpu_to_le_64(qword1);
+
+                       IAVF_BE_DUMP_RX_DESC(txq, d, head);
+
+                       head++;
+                       if (head >= txq->nb_tx_desc)
+                               head = 0;
+
+                       /* Prefetch next 4 RX descriptors */
+                       if ((head & 0x3) == 0)
+                               rte_prefetch0(d);
+               } while (m);
+
+               nb_avail -= pkt->nb_segs;
+
+               nb_tx++;
+
+               /* update stats */
+               ea = rte_pktmbuf_mtod(pkt, struct rte_ether_addr *);
+               if (rte_is_multicast_ether_addr(ea)) {
+                       if (rte_is_broadcast_ether_addr(ea))
+                               txq->stats.sent_broad_num++;
+                       else
+                               txq->stats.sent_multi_num++;
+               }
+               txq->stats.sent_pkt_num++;
+               txq->stats.sent_bytes += pkt_len;
+               /* Free entire packet */
+               rte_pktmbuf_free(pkt);
+       }
+
+end_of_xmit:
+       txq->rx_head = head;
+       txq->stats.sent_miss_num += nb_pkts - nb_tx;
+end_unlock:
+       rte_spinlock_unlock(&txq->access_lock);
+
+       return nb_tx;
+}
+
+/* TX prep functions */
+uint16_t
+iavfbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+                uint16_t nb_pkts)
+{
+       struct iavfbe_tx_queue *txq = (struct iavfbe_tx_queue *)tx_queue;
+       struct rte_mbuf *m;
+       uint16_t data_len;
+       uint32_t pkt_len;
+       int i;
+
+       for (i = 0; i < nb_pkts; i++) {
+               m = tx_pkts[i];
+               pkt_len = rte_pktmbuf_pkt_len(m);
+
+               /* Check buffer len and packet len */
+               if (pkt_len > txq->max_pkt_size) {
+                       rte_errno = EINVAL;
+                       return i;
+               }
+               /* Cannot support a pkt using more than 5 descriptors */
+               if (m->nb_segs > AVF_RX_MAX_SEG) {
+                       rte_errno = EINVAL;
+                       return i;
+               }
+               do {
+                       data_len = rte_pktmbuf_data_len(m);
+                       if (data_len > txq->buffer_size) {
+                               rte_errno = EINVAL;
+                               return i;
+                       }
+                       m = m->next;
+               } while (m);
+       }
+
+       return i;
+}
diff --git a/drivers/net/iavf_be/iavf_be_rxtx.h 
b/drivers/net/iavf_be/iavf_be_rxtx.h
index cc72769337..71495a21bd 100644
--- a/drivers/net/iavf_be/iavf_be_rxtx.h
+++ b/drivers/net/iavf_be/iavf_be_rxtx.h
@@ -101,5 +101,65 @@ void iavfbe_dev_rxq_info_get(struct rte_eth_dev *dev, 
uint16_t queue_id,
                             struct rte_eth_rxq_info *qinfo);
 void iavfbe_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
                             struct rte_eth_txq_info *qinfo);
+uint16_t iavfbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+                         uint16_t nb_pkts);
+uint16_t iavfbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+                         uint16_t nb_pkts);
+uint16_t iavfbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+                         uint16_t nb_pkts);
+
+static inline
+void iavfbe_dump_rx_descriptor(struct iavfbe_tx_queue *txq,
+                           const void *desc,
+                           uint16_t rx_id)
+{
+       const union iavf_32byte_rx_desc *rx_desc = desc;
+
+       printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64
+              " QW2: 0x%016"PRIx64" QW3: 0x%016"PRIx64"\n", txq->queue_id,
+              rx_id, rx_desc->read.pkt_addr, rx_desc->read.hdr_addr,
+              rx_desc->read.rsvd1, rx_desc->read.rsvd2);
+}
+
+/* All the descriptors are 16 bytes, so just use one of them
+ * to print the qwords
+ */
+static inline
+void iavfbe_dump_tx_descriptor(const struct iavfbe_rx_queue *rxq,
+                           const void *desc, uint16_t tx_id)
+{
+       const char *name;
+       const struct iavf_tx_desc *tx_desc = desc;
+       enum iavf_tx_desc_dtype_value type;
+
+       type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
+               tx_desc->cmd_type_offset_bsz &
+               rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+       switch (type) {
+       case IAVF_TX_DESC_DTYPE_DATA:
+               name = "Tx_data_desc";
+               break;
+       case IAVF_TX_DESC_DTYPE_CONTEXT:
+               name = "Tx_context_desc";
+               break;
+       default:
+               name = "unknown_desc";
+               break;
+       }
+
+       printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
+              rxq->queue_id, name, tx_id, tx_desc->buffer_addr,
+              tx_desc->cmd_type_offset_bsz);
+}
+
+#ifdef DEBUG_DUMP_DESC
+#define IAVF_BE_DUMP_RX_DESC(rxq, desc, rx_id) \
+       iavfbe_dump_rx_descriptor(rxq, desc, rx_id)
+#define IAVF_BE_DUMP_TX_DESC(txq, desc, tx_id) \
+       iavfbe_dump_tx_descriptor(txq, desc, tx_id)
+#else
+#define IAVF_BE_DUMP_RX_DESC(rxq, desc, rx_id) do { } while (0)
+#define IAVF_BE_DUMP_TX_DESC(txq, desc, tx_id) do { } while (0)
+#endif
 
 #endif /* _AVF_BE_RXTX_H_ */
-- 
2.21.1

Reply via email to