From: Beilei Xing <beilei.x...@intel.com>

Add dispatch process cpfl_packets_dispatch function.

Signed-off-by: Beilei Xing <beilei.x...@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c      |  39 ++++++++-
 drivers/net/cpfl/cpfl_ethdev.h      |   1 +
 drivers/net/cpfl/cpfl_representor.c |  80 +++++++++++++++++
 drivers/net/cpfl/cpfl_rxtx.c        | 131 ++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_rxtx.h        |   8 ++
 5 files changed, 257 insertions(+), 2 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index f674d93050..8569a0b81d 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -129,6 +129,13 @@ static const struct rte_cpfl_xstats_name_off 
rte_cpfl_stats_strings[] = {
 
 #define CPFL_NB_XSTATS                 RTE_DIM(rte_cpfl_stats_strings)
 
+static const struct rte_mbuf_dynfield cpfl_source_metadata_param = {
+       .name = "cpfl_source_metadata",
+       .size = sizeof(uint16_t),
+       .align = __alignof__(uint16_t),
+       .flags = 0,
+};
+
 static int
 cpfl_dev_link_update(struct rte_eth_dev *dev,
                     __rte_unused int wait_to_complete)
@@ -2382,7 +2389,7 @@ static int
 cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
        struct cpfl_adapter_ext *adapter;
-       int retval;
+       int retval, offset;
        uint16_t port_id;
 
        adapter = rte_zmalloc("cpfl_adapter_ext",
@@ -2432,7 +2439,22 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
                        PMD_INIT_LOG(ERR, "Failed to create exceptional vport. 
");
                        goto close_ethdev;
                }
+
+               /* register dynfield to carry src_vsi
+                * TODO: is this a waste to use dynfield? Can we redefine a 
recv func like
+                * below to carry src vsi directly by src_vsi[]?
+                * idpf_exceptioanl_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
+                * uint16_t src_vsi[], uint16_t nb_pkts)
+                */
+               offset = 
rte_mbuf_dynfield_register(&cpfl_source_metadata_param);
+               if (unlikely(offset == -1)) {
+                       retval = -rte_errno;
+                       PMD_INIT_LOG(ERR, "source metadata is disabled in 
mbuf");
+                       goto close_ethdev;
+               }
+               cpfl_dynfield_source_metadata_offset = offset;
        }
+
        retval = cpfl_repr_create(pci_dev, adapter);
        if (retval != 0) {
                PMD_INIT_LOG(ERR, "Failed to create representors ");
@@ -2458,7 +2480,7 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 static int
 cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
 {
-       int ret;
+       int ret, offset;
 
        ret = cpfl_parse_devargs(pci_dev, adapter, false);
        if (ret != 0) {
@@ -2478,6 +2500,19 @@ cpfl_pci_probe_again(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *ad
                        PMD_INIT_LOG(ERR, "Failed to create exceptional vport. 
");
                        return ret;
                }
+
+               /* register dynfield to carry src_vsi
+                * TODO: is this a waste to use dynfield? Can we redefine a 
recv func like
+                * below to carry src vsi directly by src_vsi[]?
+                * idpf_exceptioanl_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
+                * uint16_t src_vsi[], uint16_t nb_pkts)
+                */
+               offset = 
rte_mbuf_dynfield_register(&cpfl_source_metadata_param);
+               if (unlikely(offset == -1)) {
+                       PMD_INIT_LOG(ERR, "source metadata is disabled in 
mbuf");
+                       return -rte_errno;
+               }
+               cpfl_dynfield_source_metadata_offset = offset;
        }
 
        ret = cpfl_repr_create(pci_dev, adapter);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 33e810408b..5bd6f930b8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -227,6 +227,7 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
                           struct cpchnl2_vport_id *vport_id,
                           struct cpfl_vport_id *vi,
                           struct cpchnl2_get_vport_info_response *response);
+int cpfl_packets_dispatch(void *arg);
 
 #define CPFL_DEV_TO_PCI(eth_dev)               \
        RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_representor.c 
b/drivers/net/cpfl/cpfl_representor.c
index 51b70ea346..a781cff403 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -4,6 +4,7 @@
 
 #include "cpfl_representor.h"
 #include "cpfl_rxtx.h"
+#include "cpfl_ethdev.h"
 
 static int
 cpfl_repr_whitelist_update(struct cpfl_adapter_ext *adapter,
@@ -853,3 +854,82 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct 
cpfl_adapter_ext *adapte
 
        return 0;
 }
+
+static struct cpfl_repr *
+cpfl_get_repr_by_vsi(struct cpfl_adapter_ext *adapter,
+                    uint16_t vsi_id)
+{
+       const struct cpfl_repr_id *repr_id;
+       struct rte_eth_dev *dev;
+       struct cpfl_repr *repr;
+       uint32_t iter = 0;
+
+       rte_spinlock_lock(&adapter->repr_lock);
+
+       while (rte_hash_iterate(adapter->repr_whitelist_hash,
+                               (const void **)&repr_id, (void **)&dev, &iter) 
>= 0) {
+               if (dev == NULL)
+                       continue;
+
+               repr = CPFL_DEV_TO_REPR(dev);
+               if (repr->vport_info->vport_info.vsi_id == vsi_id) {
+                       rte_spinlock_unlock(&adapter->repr_lock);
+                       return repr;
+               }
+       }
+
+       rte_spinlock_unlock(&adapter->repr_lock);
+       return NULL;
+}
+
+#define PKT_DISPATCH_BURST  32
+/* Function to dispath packets to representors' rx rings */
+int
+cpfl_packets_dispatch(void *arg)
+{
+       struct rte_eth_dev *dev = arg;
+       struct cpfl_vport *vport = dev->data->dev_private;
+       struct cpfl_adapter_ext *adapter = vport->itf.adapter;
+       struct cpfl_rx_queue **rxq =
+               (struct cpfl_rx_queue **)dev->data->rx_queues;
+       struct rte_mbuf *pkts_burst[PKT_DISPATCH_BURST];
+       struct cpfl_repr *repr;
+       struct rte_eth_dev_data *dev_data;
+       struct cpfl_repr_rx_queue *repr_rxq;
+       uint16_t src_vsi;
+       uint32_t nb_rx, nb_enq;
+       uint8_t i, j;
+
+       if (dev->data->dev_started == 0) {
+               /* skip if excpetional vport is not started*/
+               return 0;
+       }
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               nb_rx = cpfl_splitq_recv_pkts(rxq[i], pkts_burst, 
PKT_DISPATCH_BURST);
+               for (j = 0; j < nb_rx; j++) {
+                       src_vsi = *CPFL_MBUF_SOURCE_METADATA(pkts_burst[j]);
+                       /* Get the repr according to source vsi */
+                       repr = cpfl_get_repr_by_vsi(adapter, src_vsi);
+                       if (unlikely(!repr)) {
+                               rte_pktmbuf_free(pkts_burst[j]);
+                               continue;
+                       }
+                       dev_data = (struct rte_eth_dev_data *)repr->itf.data;
+                       if (unlikely(!dev_data->dev_started || 
!dev_data->rx_queue_state[0])) {
+                               rte_pktmbuf_free(pkts_burst[j]);
+                               continue;
+                       }
+                       repr_rxq = (struct cpfl_repr_rx_queue *)
+                               (((struct rte_eth_dev_data 
*)repr->itf.data)->rx_queues[0]);
+                       if (unlikely(!repr_rxq || !repr_rxq->rx_ring)) {
+                               rte_pktmbuf_free(pkts_burst[j]);
+                               continue;
+                       }
+                       nb_enq = rte_ring_enqueue_bulk(repr_rxq->rx_ring,
+                                                      (void *)&pkts_burst[j], 
1, NULL);
+                       if (!nb_enq) /* enqueue fails, just free it */
+                               rte_pktmbuf_free(pkts_burst[j]);
+               }
+       }
+       return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 882efe04cf..a931b5ec12 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1412,6 +1412,137 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
        }
 }
 
+int cpfl_dynfield_source_metadata_offset = -1;
+
+uint16_t
+cpfl_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+                     uint16_t nb_pkts)
+{
+       volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc_ring;
+       volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
+       uint16_t pktlen_gen_bufq_id;
+       struct idpf_rx_queue *rxq;
+       const uint32_t *ptype_tbl;
+       uint8_t status_err0_qw1;
+       struct idpf_adapter *ad;
+       struct rte_mbuf *rxm;
+       uint16_t rx_id_bufq1;
+       uint16_t rx_id_bufq2;
+       uint64_t pkt_flags;
+       uint16_t pkt_len;
+       uint16_t bufq_id;
+       uint16_t gen_id;
+       uint16_t rx_id;
+       uint16_t nb_rx;
+       uint64_t ts_ns;
+
+       nb_rx = 0;
+       rxq = rx_queue;
+       ad = rxq->adapter;
+
+       if (unlikely(rxq == NULL) || unlikely(!rxq->q_started))
+               return nb_rx;
+
+       rx_id = rxq->rx_tail;
+       rx_id_bufq1 = rxq->bufq1->rx_next_avail;
+       rx_id_bufq2 = rxq->bufq2->rx_next_avail;
+       rx_desc_ring = rxq->rx_ring;
+       ptype_tbl = rxq->adapter->ptype_tbl;
+
+       if ((rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0)
+               rxq->hw_register_set = 1;
+
+       while (nb_rx < nb_pkts) {
+               rx_desc = &rx_desc_ring[rx_id];
+
+               pktlen_gen_bufq_id =
+                       rte_le_to_cpu_16(rx_desc->pktlen_gen_bufq_id);
+               gen_id = (pktlen_gen_bufq_id &
+                         VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M) >>
+                       VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S;
+               if (gen_id != rxq->expected_gen_id)
+                       break;
+
+               pkt_len = (pktlen_gen_bufq_id &
+                          VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M) >>
+                       VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S;
+               if (pkt_len == 0)
+                       RX_LOG(ERR, "Packet length is 0");
+
+               rx_id++;
+               if (unlikely(rx_id == rxq->nb_rx_desc)) {
+                       rx_id = 0;
+                       rxq->expected_gen_id ^= 1;
+               }
+
+               bufq_id = (pktlen_gen_bufq_id &
+                          VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M) >>
+                       VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S;
+               if (bufq_id == 0) {
+                       rxm = rxq->bufq1->sw_ring[rx_id_bufq1];
+                       rx_id_bufq1++;
+                       if (unlikely(rx_id_bufq1 == rxq->bufq1->nb_rx_desc))
+                               rx_id_bufq1 = 0;
+                       rxq->bufq1->nb_rx_hold++;
+               } else {
+                       rxm = rxq->bufq2->sw_ring[rx_id_bufq2];
+                       rx_id_bufq2++;
+                       if (unlikely(rx_id_bufq2 == rxq->bufq2->nb_rx_desc))
+                               rx_id_bufq2 = 0;
+                       rxq->bufq2->nb_rx_hold++;
+               }
+
+               rxm->pkt_len = pkt_len;
+               rxm->data_len = pkt_len;
+               rxm->data_off = RTE_PKTMBUF_HEADROOM;
+               rxm->next = NULL;
+               rxm->nb_segs = 1;
+               rxm->port = rxq->port_id;
+               rxm->ol_flags = 0;
+               rxm->packet_type =
+                       ptype_tbl[(rte_le_to_cpu_16(rx_desc->ptype_err_fflags0) 
&
+                                  VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M) >>
+                                 VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S];
+
+               status_err0_qw1 = rx_desc->status_err0_qw1;
+               pkt_flags = idpf_splitq_rx_csum_offload(status_err0_qw1);
+               pkt_flags |= idpf_splitq_rx_rss_offload(rxm, rx_desc);
+               if (idpf_timestamp_dynflag > 0 &&
+                   (rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP)) {
+                       /* timestamp */
+                       ts_ns = idpf_tstamp_convert_32b_64b(ad,
+                                                           
rxq->hw_register_set,
+                                                           
rte_le_to_cpu_32(rx_desc->ts_high));
+                       rxq->hw_register_set = 0;
+                       *RTE_MBUF_DYNFIELD(rxm,
+                                          idpf_timestamp_dynfield_offset,
+                                          rte_mbuf_timestamp_t *) = ts_ns;
+                       rxm->ol_flags |= idpf_timestamp_dynflag;
+               }
+
+               if (likely(cpfl_dynfield_source_metadata_offset != -1))
+                       *CPFL_MBUF_SOURCE_METADATA(rxm) =
+                               rte_le_to_cpu_16(rx_desc->fmd4);
+
+               rxm->ol_flags |= pkt_flags;
+
+               rx_pkts[nb_rx++] = rxm;
+       }
+
+       if (nb_rx > 0) {
+               rxq->rx_tail = rx_id;
+               if (rx_id_bufq1 != rxq->bufq1->rx_next_avail)
+                       rxq->bufq1->rx_next_avail = rx_id_bufq1;
+               if (rx_id_bufq2 != rxq->bufq2->rx_next_avail)
+                       rxq->bufq2->rx_next_avail = rx_id_bufq2;
+
+               idpf_split_rx_bufq_refill(rxq->bufq1);
+               idpf_split_rx_bufq_refill(rxq->bufq2);
+       }
+
+       return nb_rx;
+}
+
 static inline void
 cpfl_set_tx_switch_ctx(uint16_t vsi_id, bool is_vsi,
                       volatile union idpf_flex_tx_ctx_desc *ctx_desc)
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 463ab73323..39e5e115d6 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -81,6 +81,11 @@ cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset)
        return start_qid + offset;
 }
 
+extern int cpfl_dynfield_source_metadata_offset;
+
+#define CPFL_MBUF_SOURCE_METADATA(m)                                   \
+       RTE_MBUF_DYNFIELD((m), cpfl_dynfield_source_metadata_offset, uint16_t *)
+
 static inline uint64_t
 cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t tail_spacing)
 {
@@ -128,4 +133,7 @@ uint16_t cpfl_dummy_xmit_pkts(void *queue,
 uint16_t cpfl_xmit_pkts_to_vsi(struct cpfl_tx_queue *txq,
                               struct rte_mbuf **tx_pkts,
                               uint16_t nb_pkts, uint16_t vsi_id);
+uint16_t cpfl_splitq_recv_pkts(void *rx_queue,
+                              struct rte_mbuf **rx_pkts,
+                              uint16_t nb_pkts);
 #endif /* _CPFL_RXTX_H_ */
-- 
2.34.1

Reply via email to