This patch add some structure for hairpin queue,
cpfl_rx_queue/cpfl_tx_queue/cpfl_vport.

Signed-off-by: Mingxia Liu <mingxia....@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c          | 102 +++++++-----
 drivers/net/cpfl/cpfl_ethdev.h          |   8 +-
 drivers/net/cpfl/cpfl_rxtx.c            | 196 +++++++++++++++++-------
 drivers/net/cpfl/cpfl_rxtx.h            |  28 ++++
 drivers/net/cpfl/cpfl_rxtx_vec_common.h |  18 ++-
 5 files changed, 255 insertions(+), 97 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 543dbd60f0..f799707ea7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -108,7 +108,9 @@ static int
 cpfl_dev_link_update(struct rte_eth_dev *dev,
                     __rte_unused int wait_to_complete)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        struct rte_eth_link new_link;
 
        memset(&new_link, 0, sizeof(new_link));
@@ -160,7 +162,9 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
 static int
 cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        struct idpf_adapter *adapter = vport->adapter;
 
        dev_info->max_rx_queues = adapter->caps.max_rx_q;
@@ -220,7 +224,9 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
 static int
 cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
 
        /* mtu setting is forbidden if port is start */
        if (dev->data->dev_started) {
@@ -260,12 +266,12 @@ static uint64_t
 cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
 {
        uint64_t mbuf_alloc_failed = 0;
-       struct idpf_rx_queue *rxq;
+       struct cpfl_rx_queue *cpfl_rxq;
        int i = 0;
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               rxq = dev->data->rx_queues[i];
-               mbuf_alloc_failed += 
__atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
+               cpfl_rxq = dev->data->rx_queues[i];
+               mbuf_alloc_failed += 
__atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
                                                     __ATOMIC_RELAXED);
        }
 
@@ -275,8 +281,9 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
 static int
 cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
-       struct idpf_vport *vport =
-               (struct idpf_vport *)dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        struct virtchnl2_vport_stats *pstats = NULL;
        int ret;
 
@@ -308,20 +315,21 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct 
rte_eth_stats *stats)
 static void
 cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
 {
-       struct idpf_rx_queue *rxq;
+       struct cpfl_rx_queue *cpfl_rxq;
        int i;
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               rxq = dev->data->rx_queues[i];
-               __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, 
__ATOMIC_RELAXED);
+               cpfl_rxq = dev->data->rx_queues[i];
+               __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, 
__ATOMIC_RELAXED);
        }
 }
 
 static int
 cpfl_dev_stats_reset(struct rte_eth_dev *dev)
 {
-       struct idpf_vport *vport =
-               (struct idpf_vport *)dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        struct virtchnl2_vport_stats *pstats = NULL;
        int ret;
 
@@ -346,8 +354,9 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev *dev)
 static int cpfl_dev_xstats_get(struct rte_eth_dev *dev,
                               struct rte_eth_xstat *xstats, unsigned int n)
 {
-       struct idpf_vport *vport =
-               (struct idpf_vport *)dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        struct virtchnl2_vport_stats *pstats = NULL;
        unsigned int i;
        int ret;
@@ -461,7 +470,9 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev,
                     struct rte_eth_rss_reta_entry64 *reta_conf,
                     uint16_t reta_size)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        struct idpf_adapter *adapter = vport->adapter;
        uint16_t idx, shift;
        int ret = 0;
@@ -500,7 +511,9 @@ cpfl_rss_reta_query(struct rte_eth_dev *dev,
                    struct rte_eth_rss_reta_entry64 *reta_conf,
                    uint16_t reta_size)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        struct idpf_adapter *adapter = vport->adapter;
        uint16_t idx, shift;
        int ret = 0;
@@ -538,7 +551,9 @@ static int
 cpfl_rss_hash_update(struct rte_eth_dev *dev,
                     struct rte_eth_rss_conf *rss_conf)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        struct idpf_adapter *adapter = vport->adapter;
        int ret = 0;
 
@@ -603,7 +618,9 @@ static int
 cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
                       struct rte_eth_rss_conf *rss_conf)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        struct idpf_adapter *adapter = vport->adapter;
        int ret = 0;
 
@@ -640,7 +657,9 @@ cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
 static int
 cpfl_dev_configure(struct rte_eth_dev *dev)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        struct rte_eth_conf *conf = &dev->data->dev_conf;
        struct idpf_adapter *adapter = vport->adapter;
        int ret;
@@ -703,7 +722,9 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
 static int
 cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        uint16_t nb_rx_queues = dev->data->nb_rx_queues;
 
        return idpf_vport_irq_map_config(vport, nb_rx_queues);
@@ -712,15 +733,16 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
 static int
 cpfl_start_queues(struct rte_eth_dev *dev)
 {
-       struct idpf_rx_queue *rxq;
-       struct idpf_tx_queue *txq;
+       struct cpfl_rx_queue *cpfl_rxq;
+       struct cpfl_tx_queue *cpfl_txq;
        int err = 0;
        int i;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               txq = dev->data->tx_queues[i];
-               if (txq == NULL || txq->tx_deferred_start)
+               cpfl_txq = dev->data->tx_queues[i];
+               if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
                        continue;
+
                err = cpfl_tx_queue_start(dev, i);
                if (err != 0) {
                        PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
@@ -729,8 +751,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
        }
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               rxq = dev->data->rx_queues[i];
-               if (rxq == NULL || rxq->rx_deferred_start)
+               cpfl_rxq = dev->data->rx_queues[i];
+               if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
                        continue;
                err = cpfl_rx_queue_start(dev, i);
                if (err != 0) {
@@ -745,7 +767,9 @@ cpfl_start_queues(struct rte_eth_dev *dev)
 static int
 cpfl_dev_start(struct rte_eth_dev *dev)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        struct idpf_adapter *base = vport->adapter;
        struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(base);
        uint16_t num_allocated_vectors = base->caps.num_allocated_vectors;
@@ -808,7 +832,9 @@ cpfl_dev_start(struct rte_eth_dev *dev)
 static int
 cpfl_dev_stop(struct rte_eth_dev *dev)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
 
        if (vport->stopped == 1)
                return 0;
@@ -829,7 +855,9 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
 static int
 cpfl_dev_close(struct rte_eth_dev *dev)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
 
        cpfl_dev_stop(dev);
@@ -839,7 +867,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
        adapter->cur_vport_nb--;
        dev->data->dev_private = NULL;
        adapter->vports[vport->sw_idx] = NULL;
-       rte_free(vport);
+       rte_free(cpfl_vport);
 
        return 0;
 }
@@ -1012,7 +1040,7 @@ cpfl_find_vport(struct cpfl_adapter_ext *adapter, 
uint32_t vport_id)
        int i;
 
        for (i = 0; i < adapter->cur_vport_nb; i++) {
-               vport = adapter->vports[i];
+               vport = &(adapter->vports[i]->base);
                if (vport->vport_id != vport_id)
                        continue;
                else
@@ -1225,7 +1253,9 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *ad)
 static int
 cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        struct cpfl_vport_param *param = init_params;
        struct cpfl_adapter_ext *adapter = param->adapter;
        /* for sending create vport virtchnl msg prepare */
@@ -1251,7 +1281,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void 
*init_params)
                goto err;
        }
 
-       adapter->vports[param->idx] = vport;
+       adapter->vports[param->idx] = cpfl_vport;
        adapter->cur_vports |= RTE_BIT32(param->devarg_id);
        adapter->cur_vport_nb++;
 
@@ -1369,7 +1399,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv 
__rte_unused,
                snprintf(name, sizeof(name), "cpfl_%s_vport_0",
                         pci_dev->device.name);
                retval = rte_eth_dev_create(&pci_dev->device, name,
-                                           sizeof(struct idpf_vport),
+                                           sizeof(struct cpfl_vport),
                                            NULL, NULL, cpfl_dev_vport_init,
                                            &vport_param);
                if (retval != 0)
@@ -1387,7 +1417,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv 
__rte_unused,
                                 pci_dev->device.name,
                                 devargs.req_vports[i]);
                        retval = rte_eth_dev_create(&pci_dev->device, name,
-                                                   sizeof(struct idpf_vport),
+                                                   sizeof(struct cpfl_vport),
                                                    NULL, NULL, 
cpfl_dev_vport_init,
                                                    &vport_param);
                        if (retval != 0)
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index e00dff4bf0..ef3225878b 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -70,13 +70,19 @@ struct cpfl_devargs {
        uint16_t req_vport_nb;
 };
 
+struct cpfl_vport {
+       /* p2p mbuf pool */
+       struct rte_mempool *p2p_mp;
+       struct idpf_vport base;
+};
+
 struct cpfl_adapter_ext {
        TAILQ_ENTRY(cpfl_adapter_ext) next;
        struct idpf_adapter base;
 
        char name[CPFL_ADAPTER_NAME_LEN];
 
-       struct idpf_vport **vports;
+       struct cpfl_vport **vports;
        uint16_t max_vport_nb;
 
        uint16_t cur_vports; /* bit mask of created vport */
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 6226b02301..c7e5487366 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -10,6 +10,11 @@
 #include "cpfl_rxtx.h"
 #include "cpfl_rxtx_vec_common.h"
 
+static void
+cpfl_tx_queue_release(void *txq);
+static void
+cpfl_rx_queue_release(void *txq);
+
 static uint64_t
 cpfl_rx_offload_convert(uint64_t offload)
 {
@@ -128,7 +133,9 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct 
idpf_rx_queue *rxq,
                         uint16_t nb_desc, unsigned int socket_id,
                         struct rte_mempool *mp, uint8_t bufq_id)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        struct idpf_adapter *adapter = vport->adapter;
        struct idpf_hw *hw = &adapter->hw;
        const struct rte_memzone *mz;
@@ -225,9 +232,12 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
                    const struct rte_eth_rxconf *rx_conf,
                    struct rte_mempool *mp)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        struct idpf_adapter *adapter = vport->adapter;
        struct idpf_hw *hw = &adapter->hw;
+       struct cpfl_rx_queue *cpfl_rxq;
        const struct rte_memzone *mz;
        struct idpf_rx_queue *rxq;
        uint16_t rx_free_thresh;
@@ -247,21 +257,23 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
 
        /* Free memory if needed */
        if (dev->data->rx_queues[queue_idx] != NULL) {
-               idpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]);
+               cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
                dev->data->rx_queues[queue_idx] = NULL;
        }
 
        /* Setup Rx queue */
-       rxq = rte_zmalloc_socket("cpfl rxq",
-                                sizeof(struct idpf_rx_queue),
+       cpfl_rxq = rte_zmalloc_socket("cpfl rxq",
+                                sizeof(struct cpfl_rx_queue),
                                 RTE_CACHE_LINE_SIZE,
                                 socket_id);
-       if (rxq == NULL) {
+       if (cpfl_rxq == NULL) {
                PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data 
structure");
                ret = -ENOMEM;
                goto err_rxq_alloc;
        }
 
+       rxq = &(cpfl_rxq->base);
+
        is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
 
        rxq->mp = mp;
@@ -328,7 +340,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
        }
 
        rxq->q_set = true;
-       dev->data->rx_queues[queue_idx] = rxq;
+       dev->data->rx_queues[queue_idx] = cpfl_rxq;
 
        return 0;
 
@@ -348,7 +360,9 @@ cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct 
idpf_tx_queue *txq,
                     uint16_t queue_idx, uint16_t nb_desc,
                     unsigned int socket_id)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        const struct rte_memzone *mz;
        struct idpf_tx_queue *cq;
        int ret;
@@ -396,15 +410,18 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
                    uint16_t nb_desc, unsigned int socket_id,
                    const struct rte_eth_txconf *tx_conf)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
        struct idpf_adapter *adapter = vport->adapter;
        uint16_t tx_rs_thresh, tx_free_thresh;
        struct idpf_hw *hw = &adapter->hw;
+       struct cpfl_tx_queue *cpfl_txq;
        const struct rte_memzone *mz;
        struct idpf_tx_queue *txq;
        uint64_t offloads;
-       uint16_t len;
        bool is_splitq;
+       uint16_t len;
        int ret;
 
        offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
@@ -418,21 +435,23 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
 
        /* Free memory if needed. */
        if (dev->data->tx_queues[queue_idx] != NULL) {
-               idpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]);
+               cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
                dev->data->tx_queues[queue_idx] = NULL;
        }
 
        /* Allocate the TX queue data structure. */
-       txq = rte_zmalloc_socket("cpfl txq",
-                                sizeof(struct idpf_tx_queue),
+       cpfl_txq = rte_zmalloc_socket("cpfl txq",
+                                sizeof(struct cpfl_tx_queue),
                                 RTE_CACHE_LINE_SIZE,
                                 socket_id);
-       if (txq == NULL) {
+       if (cpfl_txq == NULL) {
                PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue 
structure");
                ret = -ENOMEM;
                goto err_txq_alloc;
        }
 
+       txq = &(cpfl_txq->base);
+
        is_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
 
        txq->nb_tx_desc = nb_desc;
@@ -486,7 +505,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
                        queue_idx * vport->chunks_info.tx_qtail_spacing);
        txq->ops = &def_txq_ops;
        txq->q_set = true;
-       dev->data->tx_queues[queue_idx] = txq;
+       dev->data->tx_queues[queue_idx] = cpfl_txq;
 
        return 0;
 
@@ -502,6 +521,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
 int
 cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
+       struct cpfl_rx_queue *cpfl_rxq;
        struct idpf_rx_queue *rxq;
        uint16_t max_pkt_len;
        uint32_t frame_size;
@@ -510,7 +530,8 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t 
rx_queue_id)
        if (rx_queue_id >= dev->data->nb_rx_queues)
                return -EINVAL;
 
-       rxq = dev->data->rx_queues[rx_queue_id];
+       cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = &(cpfl_rxq->base);
 
        if (rxq == NULL || !rxq->q_set) {
                PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
@@ -574,9 +595,11 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t 
rx_queue_id)
 int
 cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
-       struct idpf_rx_queue *rxq =
-               dev->data->rx_queues[rx_queue_id];
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
+       struct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+       struct idpf_rx_queue *rxq = &(cpfl_rxq->base);
        int err = 0;
 
        err = idpf_vc_rxq_config(vport, rxq);
@@ -609,15 +632,15 @@ cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t 
rx_queue_id)
 int
 cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-       struct idpf_tx_queue *txq;
+       struct cpfl_tx_queue *cpfl_txq;
 
        if (tx_queue_id >= dev->data->nb_tx_queues)
                return -EINVAL;
 
-       txq = dev->data->tx_queues[tx_queue_id];
+       cpfl_txq = dev->data->tx_queues[tx_queue_id];
 
        /* Init the RX tail register. */
-       IDPF_PCI_REG_WRITE(txq->qtx_tail, 0);
+       IDPF_PCI_REG_WRITE(cpfl_txq->base.qtx_tail, 0);
 
        return 0;
 }
@@ -625,12 +648,14 @@ cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
 int
 cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
-       struct idpf_tx_queue *txq =
-               dev->data->tx_queues[tx_queue_id];
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
+       struct cpfl_tx_queue *cpfl_txq =
+           dev->data->tx_queues[tx_queue_id];
        int err = 0;
 
-       err = idpf_vc_txq_config(vport, txq);
+       err = idpf_vc_txq_config(vport, &(cpfl_txq->base));
        if (err != 0) {
                PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
                return err;
@@ -649,7 +674,7 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
                PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
                            tx_queue_id);
        } else {
-               txq->q_started = true;
+               cpfl_txq->base.q_started = true;
                dev->data->tx_queue_state[tx_queue_id] =
                        RTE_ETH_QUEUE_STATE_STARTED;
        }
@@ -660,13 +685,17 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
 int
 cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
+       struct cpfl_rx_queue *cpfl_rxq;
        struct idpf_rx_queue *rxq;
        int err;
 
        if (rx_queue_id >= dev->data->nb_rx_queues)
                return -EINVAL;
 
+       cpfl_rxq = dev->data->rx_queues[rx_queue_id];
        err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
        if (err != 0) {
                PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
@@ -674,7 +703,7 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t 
rx_queue_id)
                return err;
        }
 
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = &(cpfl_rxq->base);
        if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
                rxq->ops->release_mbufs(rxq);
                idpf_qc_single_rx_queue_reset(rxq);
@@ -691,13 +720,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t 
rx_queue_id)
 int
 cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
+       struct cpfl_tx_queue *cpfl_txq;
        struct idpf_tx_queue *txq;
        int err;
 
        if (tx_queue_id >= dev->data->nb_tx_queues)
                return -EINVAL;
 
+       cpfl_txq = dev->data->tx_queues[tx_queue_id];
        err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
        if (err != 0) {
                PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
@@ -705,7 +738,7 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
                return err;
        }
 
-       txq = dev->data->tx_queues[tx_queue_id];
+       txq = &(cpfl_txq->base);
        txq->ops->release_mbufs(txq);
        if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
                idpf_qc_single_tx_queue_reset(txq);
@@ -718,28 +751,83 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
        return 0;
 }
 
+static void
+cpfl_rx_queue_release(void *rxq)
+{
+       struct cpfl_rx_queue *cpfl_rxq = rxq;
+       struct idpf_rx_queue *q = NULL;
+
+       if (cpfl_rxq == NULL)
+               return;
+
+       q = &(cpfl_rxq->base);
+
+       /* Split queue */
+       if (q->bufq1 != NULL && q->bufq2 != NULL) {
+               q->bufq1->ops->release_mbufs(q->bufq1);
+               rte_free(q->bufq1->sw_ring);
+               rte_memzone_free(q->bufq1->mz);
+               rte_free(q->bufq1);
+               q->bufq2->ops->release_mbufs(q->bufq2);
+               rte_free(q->bufq2->sw_ring);
+               rte_memzone_free(q->bufq2->mz);
+               rte_free(q->bufq2);
+               rte_memzone_free(q->mz);
+               rte_free(cpfl_rxq);
+               return;
+       }
+
+       /* Single queue */
+       q->ops->release_mbufs(q);
+       rte_free(q->sw_ring);
+       rte_memzone_free(q->mz);
+       rte_free(cpfl_rxq);
+}
+
 void
 cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       idpf_qc_rx_queue_release(dev->data->rx_queues[qid]);
+       cpfl_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+static void
+cpfl_tx_queue_release(void *txq)
+{
+       struct cpfl_tx_queue *cpfl_txq = txq;
+       struct idpf_tx_queue *q = NULL;
+
+       if (cpfl_txq == NULL)
+               return;
+
+       q = &(cpfl_txq->base);
+
+       if (q->complq) {
+               rte_memzone_free(q->complq->mz);
+               rte_free(q->complq);
+       }
+
+       q->ops->release_mbufs(q);
+       rte_free(q->sw_ring);
+       rte_memzone_free(q->mz);
+       rte_free(cpfl_txq);
 }
 
 void
 cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       idpf_qc_tx_queue_release(dev->data->tx_queues[qid]);
+       cpfl_tx_queue_release(dev->data->tx_queues[qid]);
 }
 
 void
 cpfl_stop_queues(struct rte_eth_dev *dev)
 {
-       struct idpf_rx_queue *rxq;
-       struct idpf_tx_queue *txq;
+       struct cpfl_rx_queue *cpfl_rxq;
+       struct cpfl_tx_queue *cpfl_txq;
        int i;
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               rxq = dev->data->rx_queues[i];
-               if (rxq == NULL)
+               cpfl_rxq = dev->data->rx_queues[i];
+               if (cpfl_rxq == NULL)
                        continue;
 
                if (cpfl_rx_queue_stop(dev, i) != 0)
@@ -747,8 +835,8 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
        }
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               txq = dev->data->tx_queues[i];
-               if (txq == NULL)
+               cpfl_txq = dev->data->tx_queues[i];
+               if (cpfl_txq == NULL)
                        continue;
 
                if (cpfl_tx_queue_stop(dev, i) != 0)
@@ -760,9 +848,11 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
 void
 cpfl_set_rx_function(struct rte_eth_dev *dev)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
 #ifdef RTE_ARCH_X86
-       struct idpf_rx_queue *rxq;
+       struct cpfl_rx_queue *cpfl_rxq;
        int i;
 
        if (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
@@ -788,8 +878,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
        if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
                if (vport->rx_vec_allowed) {
                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-                               rxq = dev->data->rx_queues[i];
-                               (void)idpf_qc_splitq_rx_vec_setup(rxq);
+                               cpfl_rxq = dev->data->rx_queues[i];
+                               
(void)idpf_qc_splitq_rx_vec_setup(&(cpfl_rxq->base));
                        }
 #ifdef CC_AVX512_SUPPORT
                        if (vport->rx_use_avx512) {
@@ -808,8 +898,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
        } else {
                if (vport->rx_vec_allowed) {
                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-                               rxq = dev->data->rx_queues[i];
-                               (void)idpf_qc_singleq_rx_vec_setup(rxq);
+                               cpfl_rxq = dev->data->rx_queues[i];
+                               
(void)idpf_qc_singleq_rx_vec_setup(&(cpfl_rxq->base));
                        }
 #ifdef CC_AVX512_SUPPORT
                        if (vport->rx_use_avx512) {
@@ -858,10 +948,12 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
 void
 cpfl_set_tx_function(struct rte_eth_dev *dev)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
 #ifdef RTE_ARCH_X86
 #ifdef CC_AVX512_SUPPORT
-       struct idpf_tx_queue *txq;
+       struct cpfl_tx_queue *cpfl_txq;
        int i;
 #endif /* CC_AVX512_SUPPORT */
 
@@ -876,8 +968,8 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
                                vport->tx_use_avx512 = true;
                        if (vport->tx_use_avx512) {
                                for (i = 0; i < dev->data->nb_tx_queues; i++) {
-                                       txq = dev->data->tx_queues[i];
-                                       idpf_qc_tx_vec_avx512_setup(txq);
+                                       cpfl_txq = dev->data->tx_queues[i];
+                                       
idpf_qc_tx_vec_avx512_setup(&(cpfl_txq->base));
                                }
                        }
                }
@@ -914,10 +1006,10 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
 #ifdef CC_AVX512_SUPPORT
                        if (vport->tx_use_avx512) {
                                for (i = 0; i < dev->data->nb_tx_queues; i++) {
-                                       txq = dev->data->tx_queues[i];
-                                       if (txq == NULL)
+                                       cpfl_txq = dev->data->tx_queues[i];
+                                       if (cpfl_txq == NULL)
                                                continue;
-                                       idpf_qc_tx_vec_avx512_setup(txq);
+                                       
idpf_qc_tx_vec_avx512_setup(&(cpfl_txq->base));
                                }
                                PMD_DRV_LOG(NOTICE,
                                            "Using Single AVX512 Vector Tx 
(port %d).",
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index fb267d38c8..e241afece9 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -23,6 +23,34 @@
 
 #define CPFL_SUPPORT_CHAIN_NUM 5
 
+struct cpfl_rxq_hairpin_info {
+       bool hairpin_q;         /* if rx queue is a hairpin queue */
+       /* only valid if the hairpin queue pair crosses vport */
+       bool hairpin_cv;
+       uint16_t peer_txp;
+};
+
+struct cpfl_rx_queue {
+       struct idpf_rx_queue base;
+       struct cpfl_rxq_hairpin_info hairpin_info;
+};
+
+struct cpfl_txq_hairpin_info {
+       /* only valid for hairpin queue */
+       bool hairpin_q;
+       /* only valid if the hairpin queue pair crosses vport */
+       bool hairpin_cv;
+       uint16_t peer_rxq_id;
+       uint16_t peer_rxp;
+       bool bound;
+       uint16_t complq_peer_rxq_id;
+};
+
+struct cpfl_tx_queue {
+       struct idpf_tx_queue base;
+       struct cpfl_txq_hairpin_info hairpin_info;
+};
+
 int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                        uint16_t nb_desc, unsigned int socket_id,
                        const struct rte_eth_txconf *tx_conf);
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h 
b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 665418d27d..8d0b825f95 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -76,15 +76,17 @@ cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq)
 static inline int
 cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
-       struct idpf_vport *vport = dev->data->dev_private;
-       struct idpf_rx_queue *rxq;
+       struct cpfl_vport *cpfl_vport =
+           (struct cpfl_vport *)dev->data->dev_private;
+       struct idpf_vport *vport = &(cpfl_vport->base);
+       struct cpfl_rx_queue *cpfl_rxq;
        int i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH;
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               rxq = dev->data->rx_queues[i];
-               default_ret = cpfl_rx_vec_queue_default(rxq);
+               cpfl_rxq = dev->data->rx_queues[i];
+               default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
                if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
-                       splitq_ret = cpfl_rx_splitq_vec_default(rxq);
+                       splitq_ret = 
cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
                        ret = splitq_ret && default_ret;
                } else {
                        ret = default_ret;
@@ -100,12 +102,12 @@ static inline int
 cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
        int i;
-       struct idpf_tx_queue *txq;
+       struct cpfl_tx_queue *cpfl_txq;
        int ret = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               txq = dev->data->tx_queues[i];
-               ret = cpfl_tx_vec_queue_default(txq);
+               cpfl_txq = dev->data->tx_queues[i];
+               ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
                if (ret == CPFL_SCALAR_PATH)
                        return CPFL_SCALAR_PATH;
        }
-- 
2.25.1

Reply via email to