Implementing ethernet device operation callbacks for
port representors PMD

Signed-off-by: Harman Kalra <hka...@marvell.com>
---
 drivers/net/cnxk/cnxk_rep.c     |  62 +--
 drivers/net/cnxk/cnxk_rep.h     |  36 ++
 drivers/net/cnxk/cnxk_rep_msg.h |  15 +
 drivers/net/cnxk/cnxk_rep_ops.c | 655 ++++++++++++++++++++++++++++++--
 4 files changed, 713 insertions(+), 55 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_rep.c b/drivers/net/cnxk/cnxk_rep.c
index e6f5790adc..5ee7e93ab9 100644
--- a/drivers/net/cnxk/cnxk_rep.c
+++ b/drivers/net/cnxk/cnxk_rep.c
@@ -13,6 +13,9 @@ struct eth_dev_ops cnxk_rep_dev_ops = {
        .rx_queue_release = cnxk_rep_rx_queue_release,
        .tx_queue_setup = cnxk_rep_tx_queue_setup,
        .tx_queue_release = cnxk_rep_tx_queue_release,
+       .promiscuous_enable   = cnxk_rep_promiscuous_enable,
+       .promiscuous_disable   = cnxk_rep_promiscuous_disable,
+       .mac_addr_set = cnxk_rep_mac_addr_set,
        .link_update = cnxk_rep_link_update,
        .dev_close = cnxk_rep_dev_close,
        .dev_stop = cnxk_rep_dev_stop,
@@ -24,14 +27,36 @@ struct eth_dev_ops cnxk_rep_dev_ops = {
 int
 cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev)
 {
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+       const struct plt_memzone *mz;
+
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+       if (!mz) {
+               plt_err("Failed to lookup a memzone, rep id %d, err %d",
+                       rep_dev->vf_id, rte_errno);
+               goto fail;
+       }
+
+       rep_xport_vdev_cfg = mz->addr;
        plt_rep_dbg("Representor port:%d uninit", ethdev->data->port_id);
        rte_free(ethdev->data->mac_addrs);
        ethdev->data->mac_addrs = NULL;
 
+       rep_xport_vdev_cfg->nb_rep_ports--;
+       /* Once all representors are closed, cleanup rep base vdev config */
+       if (!rep_xport_vdev_cfg->nb_rep_ports) {
+               plt_free(rep_xport_vdev_cfg->q_bmap_mem);
+               plt_free(rep_xport_vdev_cfg->mdevinfo);
+               plt_memzone_free(mz);
+       }
+
        return 0;
+fail:
+       return rte_errno;
 }
 
 int
@@ -121,26 +146,6 @@ cnxk_init_rep_internal(struct cnxk_eth_dev *pf_dev)
        return rc;
 }
 
-static uint16_t
-cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
-       PLT_SET_USED(tx_queue);
-       PLT_SET_USED(tx_pkts);
-       PLT_SET_USED(nb_pkts);
-
-       return 0;
-}
-
-static uint16_t
-cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
-{
-       PLT_SET_USED(rx_queue);
-       PLT_SET_USED(rx_pkts);
-       PLT_SET_USED(nb_pkts);
-
-       return 0;
-}
-
 static int
 cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params)
 {
@@ -152,6 +157,11 @@ cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void 
*params)
        rep_dev->vf_id = rep_params->vf_id;
        rep_dev->switch_domain_id = rep_params->switch_domain_id;
        rep_dev->parent_dev = rep_params->parent_dev;
+       rep_dev->u.rxq = UINT16_MAX;
+       rep_dev->u.txq = UINT16_MAX;
+
+       pf_dev = cnxk_eth_pmd_priv(rep_dev->parent_dev);
+       rep_dev->rep_xport_vdev = pf_dev->rep_xport_vdev;
 
        eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
        eth_dev->data->representor_id = rep_params->vf_id;
@@ -170,11 +180,10 @@ cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void 
*params)
        eth_dev->dev_ops = &cnxk_rep_dev_ops;
 
        /* Rx/Tx functions stubs to avoid crashing */
-       eth_dev->rx_pkt_burst = cnxk_rep_rx_burst;
-       eth_dev->tx_pkt_burst = cnxk_rep_tx_burst;
+       eth_dev->rx_pkt_burst = cnxk_rep_rx_burst_dummy;
+       eth_dev->tx_pkt_burst = cnxk_rep_tx_burst_dummy;
 
        /* Link state. Inherited from PF */
-       pf_dev = cnxk_eth_pmd_priv(rep_dev->parent_dev);
        link = &pf_dev->eth_dev->data->dev_link;
 
        eth_dev->data->dev_link.link_speed = link->link_speed;
@@ -325,13 +334,6 @@ cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct 
rte_eth_dev *pf_ethdev
                goto err;
        }
 
-       /* Launch a thread to handle control messages */
-       rc = cnxk_rep_control_thread_launch(pf_dev);
-       if (rc) {
-               plt_err("Failed to launch message ctrl thread");
-               goto err;
-       }
-
        return 0;
 err:
        return rc;
diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h
index 8825fa1cf2..2b6403f003 100644
--- a/drivers/net/cnxk/cnxk_rep.h
+++ b/drivers/net/cnxk/cnxk_rep.h
@@ -6,6 +6,7 @@
 #ifndef __CNXK_REP_H__
 #define __CNXK_REP_H__
 
+#define CNXK_REP_XPORT_VDEV_CFG_MZ  "rep_xport_vdev_cfg"
 #define CNXK_REP_XPORT_VDEV_DEVARGS "role=server"
 #define CNXK_REP_XPORT_VDEV_NAME          "net_memif"
 #define CNXK_REP_VDEV_CTRL_QUEUE   0
@@ -14,6 +15,18 @@
 /* Common ethdev ops */
 extern struct eth_dev_ops cnxk_rep_dev_ops;
 
+/* Representor base device configurations */
+typedef struct rep_xport_vdev_cfg_s {
+       struct plt_bitmap *q_map;
+       void *q_bmap_mem;
+       uint8_t nb_rep_ports;
+       uint8_t nb_rep_started;
+       struct rte_mempool *ctrl_chan_pool;
+       struct rte_eth_dev_info *mdevinfo;
+       bool rep_xport_configured;
+} rep_xport_vdev_cfg_t;
+
+/* Representor port configurations */
 struct cnxk_rep_dev {
        uint16_t vf_id;
        uint16_t switch_domain_id;
@@ -22,15 +35,33 @@ struct cnxk_rep_dev {
        uint16_t rep_xport_vdev;
        bool is_vf_active;
        uint16_t pf_func;
+       union {
+               uint16_t rxq;
+               uint16_t txq;
+               uint16_t rep_portid;
+       } u;
        uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
 };
 
+/* Inline functions */
 static inline struct cnxk_rep_dev *
 cnxk_rep_pmd_priv(const struct rte_eth_dev *eth_dev)
 {
        return eth_dev->data->dev_private;
 }
 
+static inline struct rte_eth_dev *
+cnxk_rep_xport_eth_dev(uint16_t portid)
+{
+       if (!rte_eth_dev_is_valid_port(portid)) {
+               plt_err("Invalid port_id=%u", portid);
+               return NULL;
+       }
+
+       return &rte_eth_devices[portid];
+}
+
+/* Prototypes */
 int cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct rte_eth_dev 
*pf_ethdev,
                       struct rte_eth_devargs *eth_da);
 int cnxk_rep_dev_remove(struct rte_eth_dev *pf_ethdev);
@@ -52,5 +83,10 @@ int cnxk_rep_dev_close(struct rte_eth_dev *eth_dev);
 int cnxk_rep_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats 
*stats);
 int cnxk_rep_stats_reset(struct rte_eth_dev *eth_dev);
 int cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct 
rte_flow_ops **ops);
+int cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev);
+int cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev);
+int cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr 
*addr);
+uint16_t cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts);
+uint16_t cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts);
 
 #endif /* __CNXK_REP_H__ */
diff --git a/drivers/net/cnxk/cnxk_rep_msg.h b/drivers/net/cnxk/cnxk_rep_msg.h
index a28c63f762..554122d7f8 100644
--- a/drivers/net/cnxk/cnxk_rep_msg.h
+++ b/drivers/net/cnxk/cnxk_rep_msg.h
@@ -19,6 +19,10 @@ typedef enum CNXK_REP_MSG {
        CNXK_REP_MSG_READY = 0,
        CNXK_REP_MSG_ACK,
        CNXK_REP_MSG_EXIT,
+       /* Ethernet operation msgs */
+       CNXK_REP_MSG_ETH_SET_MAC,
+       CNXK_REP_MSG_ETH_STATS_GET,
+       CNXK_REP_MSG_ETH_STATS_CLEAR,
        /* End of messaging sequence */
        CNXK_REP_MSG_END,
 } cnxk_rep_msg_t;
@@ -64,6 +68,17 @@ typedef struct cnxk_rep_msg_exit_data {
        uint8_t val;
 } __rte_packed cnxk_rep_msg_exit_data_t;
 
+/* Ethernet op - set mac */
+typedef struct cnxk_rep_msg_eth_mac_set_meta {
+       uint16_t portid;
+       uint8_t addr_bytes[RTE_ETHER_ADDR_LEN];
+} __rte_packed cnxk_rep_msg_eth_set_mac_meta_t;
+
+/* Ethernet op - get/clear stats */
+typedef struct cnxk_rep_msg_eth_stats_meta {
+       uint16_t portid;
+} __rte_packed cnxk_rep_msg_eth_stats_meta_t;
+
 void cnxk_rep_msg_populate_command(void *buffer, uint32_t *length, 
cnxk_rep_msg_t type,
                                   uint32_t size);
 void cnxk_rep_msg_populate_command_meta(void *buffer, uint32_t *length, void 
*msg_meta, uint32_t sz,
diff --git a/drivers/net/cnxk/cnxk_rep_ops.c b/drivers/net/cnxk/cnxk_rep_ops.c
index 3f1aab077b..022a5137df 100644
--- a/drivers/net/cnxk/cnxk_rep_ops.c
+++ b/drivers/net/cnxk/cnxk_rep_ops.c
@@ -3,6 +3,54 @@
  */
 
 #include <cnxk_rep.h>
+#include <cnxk_rep_msg.h>
+
+#define MEMPOOL_CACHE_SIZE 256
+#define TX_DESC_PER_QUEUE  512
+#define RX_DESC_PER_QUEUE  256
+#define NB_REP_VDEV_MBUF   1024
+
+static uint16_t
+cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+       struct cnxk_rep_dev *rep_dev = tx_queue;
+
+       nb_pkts = rte_eth_tx_burst(rep_dev->rep_xport_vdev, rep_dev->u.txq, 
tx_pkts, nb_pkts);
+
+       return nb_pkts;
+}
+
+static uint16_t
+cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+       struct cnxk_rep_dev *rep_dev = rx_queue;
+
+       nb_pkts = rte_eth_rx_burst(rep_dev->rep_xport_vdev, rep_dev->u.txq, 
rx_pkts, 32);
+       if (nb_pkts == 0)
+               return 0;
+
+       return nb_pkts;
+}
+
+uint16_t
+cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t 
nb_pkts)
+{
+       PLT_SET_USED(tx_queue);
+       PLT_SET_USED(tx_pkts);
+       PLT_SET_USED(nb_pkts);
+
+       return 0;
+}
+
+uint16_t
+cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t 
nb_pkts)
+{
+       PLT_SET_USED(rx_queue);
+       PLT_SET_USED(rx_pkts);
+       PLT_SET_USED(nb_pkts);
+
+       return 0;
+}
 
 int
 cnxk_rep_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
@@ -13,39 +61,379 @@ cnxk_rep_link_update(struct rte_eth_dev *ethdev, int 
wait_to_complete)
 }
 
 int
-cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info 
*devinfo)
+cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info 
*dev_info)
 {
-       PLT_SET_USED(ethdev);
-       PLT_SET_USED(devinfo);
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+       struct rte_eth_dev_info mdevinfo;
+       const struct plt_memzone *mz;
+       int rc = 0;
+
+       mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+       if (!mz) {
+               mz = plt_memzone_reserve_cache_align(CNXK_REP_XPORT_VDEV_CFG_MZ,
+                                                    
sizeof(rep_xport_vdev_cfg_t));
+               if (!mz) {
+                       plt_err("Failed to reserve a memzone, rep id %d, err 
%d",
+                               rep_dev->vf_id, rte_errno);
+                       goto fail;
+               }
+       }
+
+       rep_xport_vdev_cfg = mz->addr;
+       /* Get the rep base vdev devinfo */
+       if (!rep_xport_vdev_cfg->mdevinfo) {
+               rc = rte_eth_dev_info_get(rep_dev->rep_xport_vdev, &mdevinfo);
+               if (rc) {
+                       plt_err("Failed to get rep_xport port dev info, err 
%d", rc);
+                       goto fail;
+               }
+               rep_xport_vdev_cfg->mdevinfo = plt_zmalloc(sizeof(struct 
rte_eth_dev_info), 0);
+               if (!rep_xport_vdev_cfg->mdevinfo) {
+                       plt_err("Failed to alloc memory for dev info");
+                       goto fail;
+               }
+               rte_memcpy(rep_xport_vdev_cfg->mdevinfo, &mdevinfo,
+                          sizeof(struct rte_eth_dev_info));
+       }
+
+       /* Use rep_xport device info */
+       dev_info->max_mac_addrs = rep_xport_vdev_cfg->mdevinfo->max_mac_addrs;
+       dev_info->max_rx_pktlen = rep_xport_vdev_cfg->mdevinfo->max_rx_pktlen;
+       dev_info->min_rx_bufsize = rep_xport_vdev_cfg->mdevinfo->min_rx_bufsize;
+       dev_info->tx_offload_capa = 
rep_xport_vdev_cfg->mdevinfo->tx_offload_capa;
+
+       /* For the sake of symmetry, max_rx_queues = max_tx_queues */
+       dev_info->max_rx_queues = 1;
+       dev_info->max_tx_queues = 1;
+
+       /* MTU specifics */
+       dev_info->max_mtu = rep_xport_vdev_cfg->mdevinfo->max_mtu;
+       dev_info->min_mtu = rep_xport_vdev_cfg->mdevinfo->min_mtu;
+
+       /* Switch info specific */
+       dev_info->switch_info.name = ethdev->device->name;
+       dev_info->switch_info.domain_id = rep_dev->switch_domain_id;
+       dev_info->switch_info.port_id = rep_dev->vf_id;
+
        return 0;
+fail:
+       return rc;
+}
+
+static inline int
+bitmap_ctzll(uint64_t slab)
+{
+       if (slab == 0)
+               return 0;
+
+       return __builtin_ctzll(slab);
+}
+
+static uint16_t
+alloc_rep_xport_qid(struct plt_bitmap *bmp)
+{
+       uint16_t idx, rc;
+       uint64_t slab;
+       uint32_t pos;
+
+       pos = 0;
+       slab = 0;
+       /* Scan from the beginning */
+       plt_bitmap_scan_init(bmp);
+       /* Scan bitmap to get the free pool */
+       rc = plt_bitmap_scan(bmp, &pos, &slab);
+       /* Empty bitmap */
+       if (rc == 0)
+               return UINT16_MAX;
+
+       idx = pos + bitmap_ctzll(slab);
+       plt_bitmap_clear(bmp, idx);
+       return idx;
+}
+
+static int
+configure_rep_xport_queues_map(rep_xport_vdev_cfg_t *rep_xport_vdev_cfg)
+{
+       int id, rc = 0, q_max;
+       uint32_t bmap_sz;
+       void *bmap_mem;
+
+       q_max = CNXK_MAX_REP_PORTS + 1;
+       /* Return success on no-pci case */
+       if (!q_max)
+               return 0;
+
+       bmap_sz = plt_bitmap_get_memory_footprint(q_max);
+
+       /* Allocate memory for rep_xport queue bitmap */
+       bmap_mem = plt_zmalloc(bmap_sz, RTE_CACHE_LINE_SIZE);
+       if (bmap_mem == NULL) {
+               plt_err("Failed to allocate memory for worker lmt bmap");
+               rc = -ENOMEM;
+               goto exit;
+       }
+       rep_xport_vdev_cfg->q_bmap_mem = bmap_mem;
+
+       /* Initialize worker lmt bitmap */
+       rep_xport_vdev_cfg->q_map = plt_bitmap_init(q_max, bmap_mem, bmap_sz);
+       if (!rep_xport_vdev_cfg->q_map) {
+               plt_err("Failed to initialize rep_xport queue bitmap");
+               rc = -EIO;
+               goto exit;
+       }
+
+       /* Set all the queue initially */
+       for (id = 0; id < q_max; id++)
+               plt_bitmap_set(rep_xport_vdev_cfg->q_bmap_mem, id);
+
+       return 0;
+exit:
+       return rc;
+}
+
+static uint16_t
+cnxk_rep_eth_dev_count_total(void)
+{
+       uint16_t port, count = 0;
+       struct rte_eth_dev *ethdev;
+
+       RTE_ETH_FOREACH_DEV(port) {
+               ethdev = &rte_eth_devices[port];
+               if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+                       count++;
+       }
+
+       return count;
+}
+
+static int
+configure_control_channel(rep_xport_vdev_cfg_t *rep_xport_vdev_cfg, uint16_t 
portid)
+{
+       struct rte_mempool *ctrl_chan_pool = NULL;
+       int rc;
+
+       /* Allocate a qid for control channel */
+       alloc_rep_xport_qid(rep_xport_vdev_cfg->q_map);
+
+       /* Create the mbuf pool. */
+       ctrl_chan_pool = rte_pktmbuf_pool_create("rep_xport_ctrl_pool", 
NB_REP_VDEV_MBUF,
+                                               MEMPOOL_CACHE_SIZE, 
RTE_CACHE_LINE_SIZE,
+                                               RTE_MBUF_DEFAULT_BUF_SIZE, 
rte_socket_id());
+
+       if (ctrl_chan_pool == NULL) {
+               plt_err("Cannot init mbuf pool");
+               rc = -ENOMEM;
+               goto fail;
+       }
+
+       /* Setup a RX queue for control channel */
+       rc = rte_eth_rx_queue_setup(portid, CNXK_REP_VDEV_CTRL_QUEUE, 
RX_DESC_PER_QUEUE,
+                                   rte_eth_dev_socket_id(portid), NULL, 
ctrl_chan_pool);
+       if (rc < 0) {
+               plt_err("rte_eth_rx_queue_setup:err=%d, port=%u\n", rc, portid);
+               goto fail;
+       }
+
+       /* Setup a TX queue for control channel */
+       rc = rte_eth_tx_queue_setup(portid, CNXK_REP_VDEV_CTRL_QUEUE, 
TX_DESC_PER_QUEUE,
+                                   rte_eth_dev_socket_id(portid), NULL);
+       if (rc < 0) {
+               plt_err("TX queue setup failed, err %d port %d", rc, portid);
+               goto fail;
+       }
+
+       rep_xport_vdev_cfg->ctrl_chan_pool = ctrl_chan_pool;
+
+       return 0;
+fail:
+       return rc;
+}
+
+static int
+configure_rep_xport_dev(rep_xport_vdev_cfg_t *rep_xport_vdev_cfg, uint16_t 
portid)
+{
+       struct rte_eth_dev *rep_xport_ethdev = cnxk_rep_xport_eth_dev(portid);
+       static struct rte_eth_conf port_conf_default;
+       uint16_t nb_rxq, nb_txq, nb_rep_ports;
+       int rc = 0;
+
+       /* If rep_xport port already started, stop it and reconfigure */
+       if (rep_xport_ethdev->data->dev_started)
+               rte_eth_dev_stop(portid);
+
+       /* Get the no of representors probed */
+       nb_rep_ports = cnxk_rep_eth_dev_count_total();
+       if (nb_rep_ports > CNXK_MAX_REP_PORTS) {
+               plt_err("Representors probed %d > Max supported %d", 
nb_rep_ports,
+                       CNXK_MAX_REP_PORTS);
+               goto fail;
+       }
+
+       /* Each queue of rep_xport describes representor port. 1 additional 
queue is
+        * configured as control channel to configure flows, etc.
+        */
+       nb_rxq = CNXK_MAX_REP_PORTS + 1;
+       nb_txq = CNXK_MAX_REP_PORTS + 1;
+
+       rc = rte_eth_dev_configure(portid, nb_rxq, nb_txq, &port_conf_default);
+       if (rc) {
+               plt_err("Failed to configure rep_xport port: %d", rc);
+               goto fail;
+       }
+
+       rep_xport_vdev_cfg->rep_xport_configured = true;
+       rep_xport_vdev_cfg->nb_rep_ports = nb_rep_ports;
+
+       return 0;
+fail:
+       return rc;
 }
 
 int
 cnxk_rep_dev_configure(struct rte_eth_dev *ethdev)
 {
-       PLT_SET_USED(ethdev);
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+       const struct plt_memzone *mz;
+       int rc = -1;
+
+       mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+       if (!mz) {
+               mz = plt_memzone_reserve_cache_align(CNXK_REP_XPORT_VDEV_CFG_MZ,
+                                                    
sizeof(rep_xport_vdev_cfg_t));
+               if (!mz) {
+                       plt_err("Failed to reserve a memzone, rep id %d, err 
%d",
+                               rep_dev->vf_id, rte_errno);
+                       goto fail;
+               }
+       }
+
+       rep_xport_vdev_cfg = mz->addr;
+       /* Return if rep_xport dev already configured */
+       if (rep_xport_vdev_cfg->rep_xport_configured) {
+               rep_dev->ctrl_chan_pool = rep_xport_vdev_cfg->ctrl_chan_pool;
+               return 0;
+       }
+
+       /* Configure rep_xport pmd */
+       rc = configure_rep_xport_dev(rep_xport_vdev_cfg, 
rep_dev->rep_xport_vdev);
+       if (rc) {
+               plt_err("Configuring rep_xport port failed");
+               goto free;
+       }
+
+       /* Setup a bitmap for rep_xport queues */
+       rc = configure_rep_xport_queues_map(rep_xport_vdev_cfg);
+       if (rc != 0) {
+               plt_err("Failed to setup rep_xport queue map, err %d", rc);
+               goto free;
+       }
+
+       /* Setup a queue for control channel */
+       rc = configure_control_channel(rep_xport_vdev_cfg, 
rep_dev->rep_xport_vdev);
+       if (rc != 0) {
+               plt_err("Failed to setup control channgel, err %d", rc);
+               goto free;
+       }
+       rep_dev->ctrl_chan_pool = rep_xport_vdev_cfg->ctrl_chan_pool;
+
        return 0;
+free:
+       plt_memzone_free(mz);
+fail:
+       return rc;
 }
 
 int
-cnxk_rep_dev_start(struct rte_eth_dev *ethdev)
+cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev)
 {
        PLT_SET_USED(ethdev);
        return 0;
 }
 
 int
-cnxk_rep_dev_close(struct rte_eth_dev *ethdev)
+cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev)
 {
        PLT_SET_USED(ethdev);
        return 0;
 }
 
+int
+cnxk_rep_dev_start(struct rte_eth_dev *ethdev)
+{
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+       const struct plt_memzone *mz;
+       int rc = 0;
+
+       mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+       if (!mz) {
+               plt_err("Failed to lookup a memzone, rep id %d, err %d",
+                       rep_dev->vf_id, rte_errno);
+               goto fail;
+       }
+
+       rep_xport_vdev_cfg = mz->addr;
+       ethdev->rx_pkt_burst = cnxk_rep_rx_burst;
+       ethdev->tx_pkt_burst = cnxk_rep_tx_burst;
+
+       /* Start rep_xport device only once after first representor gets active 
*/
+       if (!rep_xport_vdev_cfg->nb_rep_started) {
+               rc = rte_eth_dev_start(rep_dev->rep_xport_vdev);
+               if (rc) {
+                       plt_err("Rep base vdev portid %d start failed, err %d",
+                               rep_dev->rep_xport_vdev, rc);
+                       goto fail;
+               }
+
+               /* Launch a thread to handle control messages */
+               rc = 
cnxk_rep_control_thread_launch(cnxk_eth_pmd_priv(rep_dev->parent_dev));
+               if (rc) {
+                       plt_err("Failed to launch message ctrl thread");
+                       goto fail;
+               }
+       }
+
+       rep_xport_vdev_cfg->nb_rep_started++;
+
+       return 0;
+fail:
+       return rc;
+}
+
+int
+cnxk_rep_dev_close(struct rte_eth_dev *ethdev)
+{
+       return cnxk_rep_dev_uninit(ethdev);
+}
+
 int
 cnxk_rep_dev_stop(struct rte_eth_dev *ethdev)
 {
-       PLT_SET_USED(ethdev);
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+       const struct plt_memzone *mz;
+
+       mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+       if (!mz) {
+               plt_err("Failed to lookup a memzone, rep id %d, err %d",
+                       rep_dev->vf_id, rte_errno);
+               goto fail;
+       }
+
+       rep_xport_vdev_cfg = mz->addr;
+       ethdev->rx_pkt_burst = cnxk_rep_rx_burst_dummy;
+       ethdev->tx_pkt_burst = cnxk_rep_tx_burst_dummy;
+       rep_xport_vdev_cfg->nb_rep_started--;
+
+       /* Stop rep_xport device only after all other devices stopped */
+       if (!rep_xport_vdev_cfg->nb_rep_started)
+               rte_eth_dev_stop(rep_dev->rep_xport_vdev);
+
        return 0;
+fail:
+       return rte_errno;
 }
 
 int
@@ -53,54 +441,220 @@ cnxk_rep_rx_queue_setup(struct rte_eth_dev *ethdev, 
uint16_t rx_queue_id, uint16
                        unsigned int socket_id, const struct rte_eth_rxconf 
*rx_conf,
                        struct rte_mempool *mb_pool)
 {
-       PLT_SET_USED(ethdev);
-       PLT_SET_USED(rx_queue_id);
-       PLT_SET_USED(nb_rx_desc);
-       PLT_SET_USED(socket_id);
-       PLT_SET_USED(rx_conf);
-       PLT_SET_USED(mb_pool);
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+       const struct plt_memzone *mz;
+       int rc = 0;
+
+       mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+       if (!mz) {
+               plt_err("Failed to lookup a memzone, rep id %d, err %d",
+                       rep_dev->vf_id, rte_errno);
+               goto fail;
+       }
+
+       rep_xport_vdev_cfg = mz->addr;
+       /* Allocate a qid, if tx queue setup already done use the same qid */
+       if (rep_dev->u.rxq == UINT16_MAX && rep_dev->u.txq == UINT16_MAX)
+               rep_dev->u.rxq = alloc_rep_xport_qid(rep_xport_vdev_cfg->q_map);
+       else
+               rep_dev->u.rxq = rep_dev->u.txq;
+
+       /* Setup the RX queue */
+       rc = rte_eth_rx_queue_setup(rep_dev->rep_xport_vdev, rep_dev->u.rxq, 
nb_rx_desc, socket_id,
+                                   rx_conf, mb_pool);
+       if (rc < 0) {
+               plt_err("rte_eth_rx_queue_setup:err=%d, port=%u\n", rc, 
rep_dev->rep_xport_vdev);
+               goto fail;
+       }
+
+       ethdev->data->rx_queues[rx_queue_id] = rep_dev;
+       plt_info("Representor id %d portid %d rxq %d", rep_dev->vf_id, 
ethdev->data->port_id,
+                rep_dev->u.rxq);
+
        return 0;
+fail:
+       return rc;
 }
 
 void
 cnxk_rep_rx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
 {
-       PLT_SET_USED(ethdev);
-       PLT_SET_USED(queue_id);
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+       const struct plt_memzone *mz;
+       RTE_SET_USED(queue_id);
+
+       mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+       if (!mz) {
+               plt_err("Failed to lookup a memzone, rep id %d, err %d",
+                       rep_dev->vf_id, rte_errno);
+               return;
+       }
+
+       rep_xport_vdev_cfg = mz->addr;
+       plt_bitmap_clear(rep_xport_vdev_cfg->q_bmap_mem, rep_dev->u.rxq);
 }
 
 int
 cnxk_rep_tx_queue_setup(struct rte_eth_dev *ethdev, uint16_t tx_queue_id, 
uint16_t nb_tx_desc,
                        unsigned int socket_id, const struct rte_eth_txconf 
*tx_conf)
 {
-       PLT_SET_USED(ethdev);
-       PLT_SET_USED(tx_queue_id);
-       PLT_SET_USED(nb_tx_desc);
-       PLT_SET_USED(socket_id);
-       PLT_SET_USED(tx_conf);
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+       const struct plt_memzone *mz;
+       int rc = 0;
+
+       mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+       if (!mz) {
+               plt_err("Failed to lookup a memzone, rep id %d, err %d",
+                       rep_dev->vf_id, rte_errno);
+               goto fail;
+       }
+
+       rep_xport_vdev_cfg = mz->addr;
+       /* Allocate a qid, if rx queue setup already done use the same qid */
+       if (rep_dev->u.rxq == UINT16_MAX && rep_dev->u.txq == UINT16_MAX)
+               rep_dev->u.txq = alloc_rep_xport_qid(rep_xport_vdev_cfg->q_map);
+       else
+               rep_dev->u.txq = rep_dev->u.rxq;
+
+       /* Setup the TX queue */
+       rc = rte_eth_tx_queue_setup(rep_dev->rep_xport_vdev, rep_dev->u.txq, 
nb_tx_desc, socket_id,
+                                   tx_conf);
+       if (rc < 0) {
+               plt_err("TX queue setup failed, err %d port %d", rc, 
rep_dev->rep_xport_vdev);
+               goto fail;
+       }
+
+       ethdev->data->tx_queues[tx_queue_id] = rep_dev;
+       plt_info("Representor id %d portid %d txq %d", rep_dev->vf_id, 
ethdev->data->port_id,
+                rep_dev->u.txq);
+
        return 0;
+fail:
+       return rc;
 }
 
 void
 cnxk_rep_tx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
 {
-       PLT_SET_USED(ethdev);
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       rep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;
+       const struct plt_memzone *mz;
        PLT_SET_USED(queue_id);
+
+       mz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);
+       if (!mz) {
+               plt_err("Failed to lookup a memzone, rep id %d, err %d",
+                       rep_dev->vf_id, rte_errno);
+               return;
+       }
+
+       rep_xport_vdev_cfg = mz->addr;
+       plt_bitmap_clear(rep_xport_vdev_cfg->q_bmap_mem, rep_dev->u.txq);
+}
+
+static int
+process_eth_stats(struct cnxk_rep_dev *rep_dev, cnxk_rep_msg_ack_data_t 
*adata, cnxk_rep_msg_t msg)
+{
+       cnxk_rep_msg_eth_stats_meta_t msg_st_meta;
+       uint32_t len = 0, rc;
+       void *buffer;
+       size_t size;
+
+       size = CNXK_REP_MSG_MAX_BUFFER_SZ;
+       buffer = plt_zmalloc(size, 0);
+       if (!buffer) {
+               plt_err("Failed to allocate mem");
+               rc = -ENOMEM;
+               goto fail;
+       }
+
+       cnxk_rep_msg_populate_header(buffer, &len);
+
+       msg_st_meta.portid = rep_dev->u.rxq;
+       cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_st_meta,
+                                          
sizeof(cnxk_rep_msg_eth_stats_meta_t), msg);
+       cnxk_rep_msg_populate_msg_end(buffer, &len);
+
+       rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata);
+       if (rc) {
+               plt_err("Failed to process the message, err %d", rc);
+               goto fail;
+       }
+
+       rte_free(buffer);
+
+       return 0;
+fail:
+       rte_free(buffer);
+       return rc;
 }
 
 int
 cnxk_rep_stats_get(struct rte_eth_dev *ethdev, struct rte_eth_stats *stats)
 {
-       PLT_SET_USED(ethdev);
-       PLT_SET_USED(stats);
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       struct rte_eth_stats vf_stats;
+       cnxk_rep_msg_ack_data_t adata;
+       int rc;
+
+       /* If representor not representing any active VF, return 0 */
+       if (!rep_dev->is_vf_active)
+               return 0;
+
+       rc = process_eth_stats(rep_dev, &adata, CNXK_REP_MSG_ETH_STATS_GET);
+       if (rc || adata.u.sval < 0) {
+               if (adata.u.sval < 0)
+                       rc = adata.u.sval;
+
+               plt_err("Failed to clear stats for vf rep %x, err %d", 
rep_dev->vf_id, rc);
+       }
+
+       if (adata.size != sizeof(struct rte_eth_stats)) {
+               rc = -EINVAL;
+               plt_err("Incomplete stats received for vf rep %d", 
rep_dev->vf_id);
+               goto fail;
+       }
+
+       rte_memcpy(&vf_stats, adata.u.data, adata.size);
+
+       stats->q_ipackets[0] = vf_stats.ipackets;
+       stats->q_ibytes[0] = vf_stats.ibytes;
+       stats->ipackets = vf_stats.ipackets;
+       stats->ibytes = vf_stats.ibytes;
+
+       stats->q_opackets[0] = vf_stats.opackets;
+       stats->q_obytes[0] = vf_stats.obytes;
+       stats->opackets = vf_stats.opackets;
+       stats->obytes = vf_stats.obytes;
+
        return 0;
+fail:
+       return rc;
 }
 
 int
 cnxk_rep_stats_reset(struct rte_eth_dev *ethdev)
 {
-       PLT_SET_USED(ethdev);
-       return 0;
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+       cnxk_rep_msg_ack_data_t adata;
+       int rc = 0;
+
+       /* If representor not representing any active VF, return 0 */
+       if (!rep_dev->is_vf_active)
+               return 0;
+
+       rc = process_eth_stats(rep_dev, &adata, CNXK_REP_MSG_ETH_STATS_CLEAR);
+       if (rc || adata.u.sval < 0) {
+               if (adata.u.sval < 0)
+                       rc = adata.u.sval;
+
+               plt_err("Failed to clear stats for vf rep %x, err %d", 
rep_dev->vf_id, rc);
+       }
+
+       return rc;
 }
 
 int
@@ -110,3 +664,54 @@ cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const 
struct rte_flow_ops **op
        PLT_SET_USED(ops);
        return 0;
 }
+
+int
+cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
+{
+       struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
+       cnxk_rep_msg_eth_set_mac_meta_t msg_sm_meta;
+       cnxk_rep_msg_ack_data_t adata;
+       uint32_t len = 0, rc;
+       void *buffer;
+       size_t size;
+
+       /* If representor not representing any VF, return 0 */
+       if (!rep_dev->is_vf_active)
+               return 0;
+
+       size = CNXK_REP_MSG_MAX_BUFFER_SZ;
+       buffer = plt_zmalloc(size, 0);
+       if (!buffer) {
+               plt_err("Failed to allocate mem");
+               rc = -ENOMEM;
+               goto fail;
+       }
+
+       cnxk_rep_msg_populate_header(buffer, &len);
+
+       msg_sm_meta.portid = rep_dev->u.rxq;
+       rte_memcpy(&msg_sm_meta.addr_bytes, addr->addr_bytes, 
RTE_ETHER_ADDR_LEN);
+       cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_sm_meta,
+                                          
sizeof(cnxk_rep_msg_eth_set_mac_meta_t),
+                                          CNXK_REP_MSG_ETH_SET_MAC);
+       cnxk_rep_msg_populate_msg_end(buffer, &len);
+
+       rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, &adata);
+       if (rc) {
+               plt_err("Failed to process the message, err %d", rc);
+               goto fail;
+       }
+
+       if (adata.u.sval < 0) {
+               rc = adata.u.sval;
+               plt_err("Failed to set mac address, err %d", rc);
+               goto fail;
+       }
+
+       rte_free(buffer);
+
+       return 0;
+fail:
+       rte_free(buffer);
+       return rc;
+}
-- 
2.18.0

Reply via email to