Implementing ethernet device operation callbacks for
port representors PMD

Signed-off-by: Harman Kalra <hka...@marvell.com>
---
 drivers/net/cnxk/cnxk_rep.c     |  28 +-
 drivers/net/cnxk/cnxk_rep.h     |  35 +++
 drivers/net/cnxk/cnxk_rep_msg.h |   8 +
 drivers/net/cnxk/cnxk_rep_ops.c | 495 ++++++++++++++++++++++++++++++--
 4 files changed, 523 insertions(+), 43 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_rep.c b/drivers/net/cnxk/cnxk_rep.c
index dc00cdecc1..ca0637bde5 100644
--- a/drivers/net/cnxk/cnxk_rep.c
+++ b/drivers/net/cnxk/cnxk_rep.c
@@ -73,6 +73,8 @@ cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, 
uint16_t hw_func, ui
 int
 cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev)
 {
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
@@ -80,6 +82,8 @@ cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev)
        rte_free(ethdev->data->mac_addrs);
        ethdev->data->mac_addrs = NULL;
 
+       rep_dev->parent_dev->repr_cnt.nb_repr_probed--;
+
        return 0;
 }
 
@@ -432,26 +436,6 @@ cnxk_rep_parent_setup(struct cnxk_eswitch_dev *eswitch_dev)
        return rc;
 }
 
-static uint16_t
-cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
-       PLT_SET_USED(tx_queue);
-       PLT_SET_USED(tx_pkts);
-       PLT_SET_USED(nb_pkts);
-
-       return 0;
-}
-
-static uint16_t
-cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
-{
-       PLT_SET_USED(rx_queue);
-       PLT_SET_USED(rx_pkts);
-       PLT_SET_USED(nb_pkts);
-
-       return 0;
-}
-
 static int
 cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params)
 {
@@ -481,8 +465,8 @@ cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params)
        eth_dev->dev_ops = &cnxk_rep_dev_ops;
 
        /* Rx/Tx functions stubs to avoid crashing */
-       eth_dev->rx_pkt_burst = cnxk_rep_rx_burst;
-       eth_dev->tx_pkt_burst = cnxk_rep_tx_burst;
+       eth_dev->rx_pkt_burst = cnxk_rep_rx_burst_dummy;
+       eth_dev->tx_pkt_burst = cnxk_rep_tx_burst_dummy;
 
        /* Only single queues for representor devices */
        eth_dev->data->nb_rx_queues = 1;
diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h
index 5a85d4376e..6a43259980 100644
--- a/drivers/net/cnxk/cnxk_rep.h
+++ b/drivers/net/cnxk/cnxk_rep.h
@@ -7,6 +7,13 @@
 #ifndef __CNXK_REP_H__
 #define __CNXK_REP_H__
 
+#define CNXK_REP_TX_OFFLOAD_CAPA                                               
                    \
+       (RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_VLAN_INSERT |   
                   \
+        RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define CNXK_REP_RX_OFFLOAD_CAPA                                               
                    \
+       (RTE_ETH_RX_OFFLOAD_SCATTER | RTE_ETH_RX_OFFLOAD_RSS_HASH | 
RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+
 /* Common ethdev ops */
 extern struct eth_dev_ops cnxk_rep_dev_ops;
 
@@ -58,12 +65,33 @@ struct cnxk_rep_dev {
        uint16_t repte_mtu;
 };
 
+/* Inline functions */
+static inline void
+cnxk_rep_lock(struct cnxk_rep_dev *rep)
+{
+       rte_spinlock_lock(&rep->parent_dev->rep_lock);
+}
+
+static inline void
+cnxk_rep_unlock(struct cnxk_rep_dev *rep)
+{
+       rte_spinlock_unlock(&rep->parent_dev->rep_lock);
+}
+
 static inline struct cnxk_rep_dev *
 cnxk_rep_pmd_priv(const struct rte_eth_dev *eth_dev)
 {
        return eth_dev->data->dev_private;
 }
 
+static __rte_always_inline void
+cnxk_rep_pool_buffer_stats(struct rte_mempool *pool)
+{
+       plt_rep_dbg("        pool %s size %d buffer count in use  %d available 
%d\n", pool->name,
+                   pool->size, rte_mempool_in_use_count(pool), 
rte_mempool_avail_count(pool));
+}
+
+/* Prototypes */
 int cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev 
*eswitch_dev);
 int cnxk_rep_dev_remove(struct cnxk_eswitch_dev *eswitch_dev);
 int cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev);
@@ -86,5 +114,12 @@ int cnxk_rep_stats_get(struct rte_eth_dev *eth_dev, struct 
rte_eth_stats *stats)
 int cnxk_rep_stats_reset(struct rte_eth_dev *eth_dev);
 int cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct 
rte_flow_ops **ops);
 int cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, uint16_t 
hw_func, uint16_t *rep_id);
+int cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev);
+int cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev);
+int cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr 
*addr);
+uint16_t cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts);
+uint16_t cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts);
+void cnxk_rep_tx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id);
+void cnxk_rep_rx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id);
 
 #endif /* __CNXK_REP_H__ */
diff --git a/drivers/net/cnxk/cnxk_rep_msg.h b/drivers/net/cnxk/cnxk_rep_msg.h
index 0543805148..63cfbe3f19 100644
--- a/drivers/net/cnxk/cnxk_rep_msg.h
+++ b/drivers/net/cnxk/cnxk_rep_msg.h
@@ -19,6 +19,8 @@ typedef enum CNXK_REP_MSG {
        CNXK_REP_MSG_READY = 0,
        CNXK_REP_MSG_ACK,
        CNXK_REP_MSG_EXIT,
+       /* Ethernet operation msgs */
+       CNXK_REP_MSG_ETH_SET_MAC,
        /* End of messaging sequence */
        CNXK_REP_MSG_END,
 } cnxk_rep_msg_t;
@@ -81,6 +83,12 @@ typedef struct cnxk_rep_msg_exit_data {
        uint16_t data[];
 } __rte_packed cnxk_rep_msg_exit_data_t;
 
+/* Ethernet op - set mac */
+typedef struct cnxk_rep_msg_eth_mac_set_meta {
+       uint16_t portid;
+       uint8_t addr_bytes[RTE_ETHER_ADDR_LEN];
+} __rte_packed cnxk_rep_msg_eth_set_mac_meta_t;
+
 void cnxk_rep_msg_populate_command(void *buffer, uint32_t *length, 
cnxk_rep_msg_t type,
                                   uint32_t size);
 void cnxk_rep_msg_populate_command_meta(void *buffer, uint32_t *length, void 
*msg_meta, uint32_t sz,
diff --git a/drivers/net/cnxk/cnxk_rep_ops.c b/drivers/net/cnxk/cnxk_rep_ops.c
index 15448688ce..97643a50f2 100644
--- a/drivers/net/cnxk/cnxk_rep_ops.c
+++ b/drivers/net/cnxk/cnxk_rep_ops.c
@@ -3,25 +3,221 @@
  */
 
 #include <cnxk_rep.h>
+#include <cnxk_rep_msg.h>
+
+#define MEMPOOL_CACHE_SIZE 256
+#define TX_DESC_PER_QUEUE  512
+#define RX_DESC_PER_QUEUE  256
+#define NB_REP_VDEV_MBUF   1024
+
+static uint16_t
+cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+       struct cnxk_rep_txq *txq = tx_queue;
+       struct cnxk_rep_dev *rep_dev;
+       uint16_t n_tx;
+
+       if (unlikely(!txq))
+               return 0;
+
+       rep_dev = txq->rep_dev;
+       plt_rep_dbg("Transmitting %d packets on eswitch queue %d", nb_pkts, 
txq->qid);
+       n_tx = cnxk_eswitch_dev_tx_burst(rep_dev->parent_dev, txq->qid, 
tx_pkts, nb_pkts,
+                                        NIX_TX_OFFLOAD_VLAN_QINQ_F);
+       return n_tx;
+}
+
+static uint16_t
+cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+       struct cnxk_rep_rxq *rxq = rx_queue;
+       struct cnxk_rep_dev *rep_dev;
+       uint16_t n_rx;
+
+       if (unlikely(!rxq))
+               return 0;
+
+       rep_dev = rxq->rep_dev;
+       n_rx = cnxk_eswitch_dev_rx_burst(rep_dev->parent_dev, rxq->qid, 
rx_pkts, nb_pkts);
+       if (n_rx == 0)
+               return 0;
+
+       plt_rep_dbg("Received %d packets on eswitch queue %d", n_rx, rxq->qid);
+       return n_rx;
+}
+
+uint16_t
+cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t 
nb_pkts)
+{
+       PLT_SET_USED(tx_queue);
+       PLT_SET_USED(tx_pkts);
+       PLT_SET_USED(nb_pkts);
+
+       return 0;
+}
+
+uint16_t
+cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t 
nb_pkts)
+{
+       PLT_SET_USED(rx_queue);
+       PLT_SET_USED(rx_pkts);
+       PLT_SET_USED(nb_pkts);
+
+       return 0;
+}
 
 int
 cnxk_rep_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
 {
-       PLT_SET_USED(ethdev);
+       struct rte_eth_link link;
        PLT_SET_USED(wait_to_complete);
+
+       memset(&link, 0, sizeof(link));
+       if (ethdev->data->dev_started)
+               link.link_status = RTE_ETH_LINK_UP;
+       else
+               link.link_status = RTE_ETH_LINK_DOWN;
+
+       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+       link.link_autoneg = RTE_ETH_LINK_FIXED;
+       link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
+
+       return rte_eth_linkstatus_set(ethdev, &link);
+}
+
+int
+cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info 
*dev_info)
+{
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       uint32_t max_rx_pktlen;
+
+       max_rx_pktlen = (roc_nix_max_pkt_len(&rep_dev->parent_dev->nix) + 
RTE_ETHER_CRC_LEN -
+                        CNXK_NIX_MAX_VTAG_ACT_SIZE);
+
+       dev_info->min_rx_bufsize = NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN;
+       dev_info->max_rx_pktlen = max_rx_pktlen;
+       dev_info->max_mac_addrs = 
roc_nix_mac_max_entries_get(&rep_dev->parent_dev->nix);
+
+       dev_info->rx_offload_capa = CNXK_REP_RX_OFFLOAD_CAPA;
+       dev_info->tx_offload_capa = CNXK_REP_TX_OFFLOAD_CAPA;
+       dev_info->rx_queue_offload_capa = 0;
+       dev_info->tx_queue_offload_capa = 0;
+
+       /* For the sake of symmetry, max_rx_queues = max_tx_queues */
+       dev_info->max_rx_queues = 1;
+       dev_info->max_tx_queues = 1;
+
+       /* MTU specifics */
+       dev_info->max_mtu = dev_info->max_rx_pktlen - (RTE_ETHER_HDR_LEN + 
RTE_ETHER_CRC_LEN);
+       dev_info->min_mtu = dev_info->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
+
+       /* Switch info specific */
+       dev_info->switch_info.name = ethdev->device->name;
+       dev_info->switch_info.domain_id = rep_dev->switch_domain_id;
+       dev_info->switch_info.port_id = rep_dev->port_id;
+
        return 0;
 }
 
 int
-cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info 
*devinfo)
+cnxk_rep_representor_info_get(struct rte_eth_dev *ethdev, struct 
rte_eth_representor_info *info)
+{
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+
+       return cnxk_eswitch_representor_info_get(rep_dev->parent_dev, info);
+}
+
+static int
+rep_eth_conf_chk(const struct rte_eth_conf *conf, uint16_t nb_rx_queues)
+{
+       const struct rte_eth_rss_conf *rss_conf;
+       int ret = 0;
+
+       if (conf->link_speeds != 0) {
+               plt_err("specific link speeds not supported");
+               ret = -EINVAL;
+       }
+
+       switch (conf->rxmode.mq_mode) {
+       case RTE_ETH_MQ_RX_RSS:
+               if (nb_rx_queues != 1) {
+                       plt_err("Rx RSS is not supported with %u queues", 
nb_rx_queues);
+                       ret = -EINVAL;
+                       break;
+               }
+
+               rss_conf = &conf->rx_adv_conf.rss_conf;
+               if (rss_conf->rss_key != NULL || rss_conf->rss_key_len != 0 ||
+                   rss_conf->rss_hf != 0) {
+                       plt_err("Rx RSS configuration is not supported");
+                       ret = -EINVAL;
+               }
+               break;
+       case RTE_ETH_MQ_RX_NONE:
+               break;
+       default:
+               plt_err("Rx mode MQ modes other than RSS not supported");
+               ret = -EINVAL;
+               break;
+       }
+
+       if (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) {
+               plt_err("Tx mode MQ modes not supported");
+               ret = -EINVAL;
+       }
+
+       if (conf->lpbk_mode != 0) {
+               plt_err("loopback not supported");
+               ret = -EINVAL;
+       }
+
+       if (conf->dcb_capability_en != 0) {
+               plt_err("priority-based flow control not supported");
+               ret = -EINVAL;
+       }
+
+       if (conf->intr_conf.lsc != 0) {
+               plt_err("link status change interrupt not supported");
+               ret = -EINVAL;
+       }
+
+       if (conf->intr_conf.rxq != 0) {
+               plt_err("receive queue interrupt not supported");
+               ret = -EINVAL;
+       }
+
+       if (conf->intr_conf.rmv != 0) {
+               plt_err("remove interrupt not supported");
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+int
+cnxk_rep_dev_configure(struct rte_eth_dev *ethdev)
+{
+       struct rte_eth_dev_data *ethdev_data = ethdev->data;
+       int rc = -1;
+
+       rc = rep_eth_conf_chk(&ethdev_data->dev_conf, 
ethdev_data->nb_rx_queues);
+       if (rc)
+               goto fail;
+
+       return 0;
+fail:
+       return rc;
+}
+
+int
+cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev)
 {
        PLT_SET_USED(ethdev);
-       PLT_SET_USED(devinfo);
        return 0;
 }
 
 int
-cnxk_rep_dev_configure(struct rte_eth_dev *ethdev)
+cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev)
 {
        PLT_SET_USED(ethdev);
        return 0;
@@ -30,21 +226,73 @@ cnxk_rep_dev_configure(struct rte_eth_dev *ethdev)
 int
 cnxk_rep_dev_start(struct rte_eth_dev *ethdev)
 {
-       PLT_SET_USED(ethdev);
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       int rc = 0, qid;
+
+       ethdev->rx_pkt_burst = cnxk_rep_rx_burst;
+       ethdev->tx_pkt_burst = cnxk_rep_tx_burst;
+
+       if (!rep_dev->is_vf_active)
+               return 0;
+
+       if (!rep_dev->rxq || !rep_dev->txq) {
+               plt_err("Invalid rxq or txq for representor id %d", 
rep_dev->rep_id);
+               rc = -EINVAL;
+               goto fail;
+       }
+
+       /* Start rx queues */
+       qid = rep_dev->rxq->qid;
+       rc = cnxk_eswitch_rxq_start(rep_dev->parent_dev, qid);
+       if (rc) {
+               plt_err("Failed to start rxq %d, rc=%d", qid, rc);
+               goto fail;
+       }
+
+       /* Start tx queues  */
+       qid = rep_dev->txq->qid;
+       rc = cnxk_eswitch_txq_start(rep_dev->parent_dev, qid);
+       if (rc) {
+               plt_err("Failed to start txq %d, rc=%d", qid, rc);
+               goto fail;
+       }
+
+       /* Start rep_xport device only once after first representor gets active 
*/
+       if (!rep_dev->parent_dev->repr_cnt.nb_repr_started) {
+               rc = cnxk_eswitch_nix_rsrc_start(rep_dev->parent_dev);
+               if (rc) {
+                       plt_err("Failed to start nix dev, rc %d", rc);
+                       goto fail;
+               }
+       }
+
+       ethdev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
+       ethdev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
+
+       rep_dev->parent_dev->repr_cnt.nb_repr_started++;
+
        return 0;
+fail:
+       return rc;
 }
 
 int
 cnxk_rep_dev_close(struct rte_eth_dev *ethdev)
 {
-       PLT_SET_USED(ethdev);
-       return 0;
+       return cnxk_rep_dev_uninit(ethdev);
 }
 
 int
 cnxk_rep_dev_stop(struct rte_eth_dev *ethdev)
 {
-       PLT_SET_USED(ethdev);
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+
+       ethdev->rx_pkt_burst = cnxk_rep_rx_burst_dummy;
+       ethdev->tx_pkt_burst = cnxk_rep_tx_burst_dummy;
+       cnxk_rep_rx_queue_stop(ethdev, 0);
+       cnxk_rep_tx_queue_stop(ethdev, 0);
+       rep_dev->parent_dev->repr_cnt.nb_repr_started--;
+
        return 0;
 }
 
@@ -53,39 +301,189 @@ cnxk_rep_rx_queue_setup(struct rte_eth_dev *ethdev, 
uint16_t rx_queue_id, uint16
                        unsigned int socket_id, const struct rte_eth_rxconf 
*rx_conf,
                        struct rte_mempool *mb_pool)
 {
-       PLT_SET_USED(ethdev);
-       PLT_SET_USED(rx_queue_id);
-       PLT_SET_USED(nb_rx_desc);
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       struct cnxk_rep_rxq *rxq = NULL;
+       uint16_t qid = 0;
+       int rc;
+
        PLT_SET_USED(socket_id);
-       PLT_SET_USED(rx_conf);
-       PLT_SET_USED(mb_pool);
+       /* If no representee assigned, store the respective rxq parameters */
+       if (!rep_dev->is_vf_active && !rep_dev->rxq) {
+               rxq = plt_zmalloc(sizeof(*rxq), RTE_CACHE_LINE_SIZE);
+               if (!rxq) {
+                       rc = -ENOMEM;
+                       plt_err("Failed to alloc RxQ for rep id %d", 
rep_dev->rep_id);
+                       goto fail;
+               }
+
+               rxq->qid = qid;
+               rxq->nb_desc = nb_rx_desc;
+               rxq->rep_dev = rep_dev;
+               rxq->mpool = mb_pool;
+               rxq->rx_conf = rx_conf;
+               rep_dev->rxq = rxq;
+               ethdev->data->rx_queues[rx_queue_id] = NULL;
+
+               return 0;
+       }
+
+       qid = rep_dev->rep_id;
+       rc = cnxk_eswitch_rxq_setup(rep_dev->parent_dev, qid, nb_rx_desc, 
rx_conf, mb_pool);
+       if (rc) {
+               plt_err("failed to setup eswitch queue id %d", qid);
+               goto fail;
+       }
+
+       rxq = rep_dev->rxq;
+       if (!rxq) {
+               plt_err("Invalid RXQ handle for representor port %d rep id %d", 
rep_dev->port_id,
+                       rep_dev->rep_id);
+               goto free_queue;
+       }
+
+       rxq->qid = qid;
+       ethdev->data->rx_queues[rx_queue_id] = rxq;
+       ethdev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+       plt_rep_dbg("representor id %d portid %d rxq id %d", rep_dev->port_id,
+                   ethdev->data->port_id, rxq->qid);
+
        return 0;
+free_queue:
+       cnxk_eswitch_rxq_release(rep_dev->parent_dev, qid);
+fail:
+       return rc;
+}
+
+void
+cnxk_rep_rx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id)
+{
+       struct cnxk_rep_rxq *rxq = ethdev->data->rx_queues[queue_id];
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       int rc;
+
+       if (!rxq)
+               return;
+
+       plt_rep_dbg("Stopping rxq %u", rxq->qid);
+
+       rc = cnxk_eswitch_rxq_stop(rep_dev->parent_dev, rxq->qid);
+       if (rc)
+               plt_err("Failed to stop rxq %d, rc=%d", rc, rxq->qid);
+
+       ethdev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 }
 
 void
 cnxk_rep_rx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
 {
-       PLT_SET_USED(ethdev);
-       PLT_SET_USED(queue_id);
+       struct cnxk_rep_rxq *rxq = ethdev->data->rx_queues[queue_id];
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       int rc;
+
+       if (!rxq) {
+               plt_err("Invalid rxq retrieved for rep_id %d", rep_dev->rep_id);
+               return;
+       }
+
+       plt_rep_dbg("Releasing rxq %u", rxq->qid);
+
+       rc = cnxk_eswitch_rxq_release(rep_dev->parent_dev, rxq->qid);
+       if (rc)
+               plt_err("Failed to release rxq %d, rc=%d", rc, rxq->qid);
 }
 
 int
 cnxk_rep_tx_queue_setup(struct rte_eth_dev *ethdev, uint16_t tx_queue_id, 
uint16_t nb_tx_desc,
                        unsigned int socket_id, const struct rte_eth_txconf 
*tx_conf)
 {
-       PLT_SET_USED(ethdev);
-       PLT_SET_USED(tx_queue_id);
-       PLT_SET_USED(nb_tx_desc);
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       struct cnxk_rep_txq *txq = NULL;
+       int rc = 0, qid = 0;
+
        PLT_SET_USED(socket_id);
-       PLT_SET_USED(tx_conf);
+       /* If no representee assigned, store the respective rxq parameters */
+       if (!rep_dev->is_vf_active && !rep_dev->txq) {
+               txq = plt_zmalloc(sizeof(*txq), RTE_CACHE_LINE_SIZE);
+               if (!txq) {
+                       rc = -ENOMEM;
+                       plt_err("failed to alloc txq for rep id %d", 
rep_dev->rep_id);
+                       goto free_queue;
+               }
+
+               txq->qid = qid;
+               txq->nb_desc = nb_tx_desc;
+               txq->tx_conf = tx_conf;
+               txq->rep_dev = rep_dev;
+               rep_dev->txq = txq;
+
+               ethdev->data->tx_queues[tx_queue_id] = NULL;
+
+               return 0;
+       }
+
+       qid = rep_dev->rep_id;
+       rc = cnxk_eswitch_txq_setup(rep_dev->parent_dev, qid, nb_tx_desc, 
tx_conf);
+       if (rc) {
+               plt_err("failed to setup eswitch queue id %d", qid);
+               goto fail;
+       }
+
+       txq = rep_dev->txq;
+       if (!txq) {
+               plt_err("Invalid TXQ handle for representor port %d rep id %d", 
rep_dev->port_id,
+                       rep_dev->rep_id);
+               goto free_queue;
+       }
+
+       txq->qid = qid;
+       ethdev->data->tx_queues[tx_queue_id] = txq;
+       ethdev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+       plt_rep_dbg("representor id %d portid %d txq id %d", rep_dev->port_id,
+                   ethdev->data->port_id, txq->qid);
+
        return 0;
+free_queue:
+       cnxk_eswitch_txq_release(rep_dev->parent_dev, qid);
+fail:
+       return rc;
+}
+
+void
+cnxk_rep_tx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id)
+{
+       struct cnxk_rep_txq *txq = ethdev->data->tx_queues[queue_id];
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       int rc;
+
+       if (!txq)
+               return;
+
+       plt_rep_dbg("Releasing txq %u", txq->qid);
+
+       rc = cnxk_eswitch_txq_stop(rep_dev->parent_dev, txq->qid);
+       if (rc)
+               plt_err("Failed to stop txq %d, rc=%d", rc, txq->qid);
+
+       ethdev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 }
 
 void
 cnxk_rep_tx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
 {
-       PLT_SET_USED(ethdev);
-       PLT_SET_USED(queue_id);
+       struct cnxk_rep_txq *txq = ethdev->data->tx_queues[queue_id];
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       int rc;
+
+       if (!txq) {
+               plt_err("Invalid txq retrieved for rep_id %d", rep_dev->rep_id);
+               return;
+       }
+
+       plt_rep_dbg("Releasing txq %u", txq->qid);
+
+       rc = cnxk_eswitch_txq_release(rep_dev->parent_dev, txq->qid);
+       if (rc)
+               plt_err("Failed to release txq %d, rc=%d", rc, txq->qid);
 }
 
 int
@@ -111,15 +509,70 @@ cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const 
struct rte_flow_ops **op
        return 0;
 }
 
+int
+cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
+{
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
+       cnxk_rep_msg_eth_set_mac_meta_t msg_sm_meta;
+       cnxk_rep_msg_ack_data_t adata;
+       uint32_t len = 0, rc;
+       void *buffer;
+       size_t size;
+
+       /* If representor not representing any VF, return 0 */
+       if (!rep_dev->is_vf_active)
+               return 0;
+
+       size = CNXK_REP_MSG_MAX_BUFFER_SZ;
+       buffer = plt_zmalloc(size, 0);
+       if (!buffer) {
+               plt_err("Failed to allocate mem");
+               rc = -ENOMEM;
+               goto fail;
+       }
+
+       cnxk_rep_msg_populate_header(buffer, &len);
+
+       msg_sm_meta.portid = rep_dev->rep_id;
+       rte_memcpy(&msg_sm_meta.addr_bytes, addr->addr_bytes, 
RTE_ETHER_ADDR_LEN);
+       cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_sm_meta,
+                                          
sizeof(cnxk_rep_msg_eth_set_mac_meta_t),
+                                          CNXK_REP_MSG_ETH_SET_MAC);
+       cnxk_rep_msg_populate_msg_end(buffer, &len);
+
+       rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, &adata);
+       if (rc) {
+               plt_err("Failed to process the message, err %d", rc);
+               goto fail;
+       }
+
+       if (adata.u.sval < 0) {
+               rc = adata.u.sval;
+               plt_err("Failed to set mac address, err %d", rc);
+               goto fail;
+       }
+
+       rte_free(buffer);
+
+       return 0;
+fail:
+       rte_free(buffer);
+       return rc;
+}
+
 /* CNXK platform representor dev ops */
 struct eth_dev_ops cnxk_rep_dev_ops = {
        .dev_infos_get = cnxk_rep_dev_info_get,
+       .representor_info_get = cnxk_rep_representor_info_get,
        .dev_configure = cnxk_rep_dev_configure,
        .dev_start = cnxk_rep_dev_start,
        .rx_queue_setup = cnxk_rep_rx_queue_setup,
        .rx_queue_release = cnxk_rep_rx_queue_release,
        .tx_queue_setup = cnxk_rep_tx_queue_setup,
        .tx_queue_release = cnxk_rep_tx_queue_release,
+       .promiscuous_enable   = cnxk_rep_promiscuous_enable,
+       .promiscuous_disable   = cnxk_rep_promiscuous_disable,
+       .mac_addr_set = cnxk_rep_mac_addr_set,
        .link_update = cnxk_rep_link_update,
        .dev_close = cnxk_rep_dev_close,
        .dev_stop = cnxk_rep_dev_stop,
-- 
2.18.0

Reply via email to