1. Process virtchnl messages from Front End.
2. Ethdev ops implemention for queues setup.

Signed-off-by: Jingjing Wu <jingjing...@intel.com>
Signed-off-by: Xiuchun Lu <xiuchun...@intel.com>
---
 drivers/net/iavf_be/iavf_be.h        |  32 ++
 drivers/net/iavf_be/iavf_be_ethdev.c | 339 ++++++++++-
 drivers/net/iavf_be/iavf_be_rxtx.c   | 164 ++++++
 drivers/net/iavf_be/iavf_be_rxtx.h   | 105 ++++
 drivers/net/iavf_be/iavf_be_vchnl.c  | 826 ++++++++++++++++++++++++++-
 drivers/net/iavf_be/meson.build      |   1 +
 6 files changed, 1452 insertions(+), 15 deletions(-)
 create mode 100644 drivers/net/iavf_be/iavf_be_rxtx.c
 create mode 100644 drivers/net/iavf_be/iavf_be_rxtx.h

diff --git a/drivers/net/iavf_be/iavf_be.h b/drivers/net/iavf_be/iavf_be.h
index c182d9558a..1ca316e3e9 100644
--- a/drivers/net/iavf_be/iavf_be.h
+++ b/drivers/net/iavf_be/iavf_be.h
@@ -8,6 +8,19 @@
 #define IAVF_BE_AQ_BUF_SZ            4096
 #define IAVF_BE_32_TO_64(hi, lo) ((((uint64_t)(hi)) << 32) + (lo))
 
+/* Default setting on number of VSIs that VF can contain */
+#define IAVF_BE_DEFAULT_VSI_NUM     1
+#define AVF_DEFAULT_MAX_MTU         1500
+
+/* Set the MAX queues to 16 and MAX vectors to 17
+ * as base mode virtchnl support 16 queue pairs mapping in max.
+ */
+#define IAVF_BE_MAX_NUM_QUEUES      16
+#define IAVF_BE_MAX_VECTORS         17
+#define IAVF_BE_BUF_SIZE_MIN        1024
+#define IAVF_BE_FRAME_SIZE_MAX      9728
+#define IAVF_BE_NUM_MACADDR_MAX     64
+
 #define IAVFBE_READ_32(addr)        \
        rte_le_to_cpu_32(*(volatile uint32_t *)(addr))
 #define IAVFBE_WRITE_32(addr, val)  \
@@ -47,8 +60,15 @@ struct iavfbe_adapter {
        /* Adminq handle thread info */
        volatile int thread_status;
        pthread_t thread_id;
+
+       struct virtchnl_version_info virtchnl_version;
+       struct virtchnl_vf_resource *vf_res; /* Resource to VF */
+       /* Pointer to array of queue pairs info. */
+       struct virtchnl_queue_pair_info *qps;
        uint16_t nb_qps;
+       uint16_t nb_used_qps;
        bool link_up;
+       struct virtchnl_eth_stats eth_stats; /* Stats to VF */
        int cq_irqfd;
        rte_atomic32_t irq_enable;
 
@@ -65,11 +85,23 @@ struct iavfbe_adapter {
 #define IAVFBE_DEV_PRIVATE_TO_ADAPTER(adapter) \
        ((struct iavfbe_adapter *)adapter)
 
+void iavfbe_reset_all_queues(struct iavfbe_adapter *adapter);
 int iavfbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+int iavfbe_lock_lanq(struct iavfbe_adapter *adapter);
+int iavfbe_unlock_lanq(struct iavfbe_adapter *adapter);
+void iavfbe_notify_vf_reset(struct iavfbe_adapter *adapter);
 void iavfbe_handle_virtchnl_msg(void *arg);
 void iavfbe_reset_asq(struct iavfbe_adapter *adapter, bool lock);
 void iavfbe_reset_arq(struct iavfbe_adapter *adapter, bool lock);
 
+static inline uint64_t stats_update(uint64_t offset, uint64_t stat)
+{
+       if (stat >= offset)
+               return (stat - offset);
+       else
+               return (uint64_t)(((uint64_t)-1) - offset + stat + 1);
+}
+
 extern int iavfbe_logtype;
 #define IAVF_BE_LOG(level, fmt, args...) \
        rte_log(RTE_LOG_ ## level, iavfbe_logtype, "%s(): " fmt "\n", \
diff --git a/drivers/net/iavf_be/iavf_be_ethdev.c 
b/drivers/net/iavf_be/iavf_be_ethdev.c
index 2ab66f889d..940ed66ce4 100644
--- a/drivers/net/iavf_be/iavf_be_ethdev.c
+++ b/drivers/net/iavf_be/iavf_be_ethdev.c
@@ -16,6 +16,7 @@
 #include <iavf_type.h>
 #include <virtchnl.h>
 #include "iavf_be.h"
+#include "iavf_be_rxtx.h"
 
 #define AVFBE_EDEV_ID_ARG "emu"
 #define AVFBE_MAC_ARG "mac"
@@ -46,6 +47,8 @@ static int iavfbe_dev_start(struct rte_eth_dev *dev);
 static int iavfbe_dev_stop(struct rte_eth_dev *dev);
 static int iavfbe_dev_info_get(struct rte_eth_dev *dev,
                                struct rte_eth_dev_info *dev_info);
+static int iavfbe_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats 
*stats);
+static int iavfbe_stats_reset(struct rte_eth_dev *dev);
 static void iavfbe_destroy_adapter(struct rte_eth_dev *dev);
 
 struct rte_iavf_emu_notify_ops iavfbe_notify_ops = {
@@ -64,17 +67,80 @@ static const struct eth_dev_ops iavfbe_eth_dev_ops = {
        .dev_start                  = iavfbe_dev_start,
        .dev_stop                   = iavfbe_dev_stop,
        .dev_infos_get              = iavfbe_dev_info_get,
+       .rx_queue_setup             = iavfbe_dev_rx_queue_setup,
+       .tx_queue_setup             = iavfbe_dev_tx_queue_setup,
+       .rx_queue_release           = iavfbe_dev_rx_queue_release,
+       .tx_queue_release           = iavfbe_dev_tx_queue_release,
+       .rxq_info_get               = iavfbe_dev_rxq_info_get,
+       .txq_info_get               = iavfbe_dev_txq_info_get,
        .link_update                = iavfbe_dev_link_update,
+       .stats_get                  = iavfbe_stats_get,
+       .stats_reset                = iavfbe_stats_reset,
 };
 
 static int
-iavfbe_dev_info_get(struct rte_eth_dev *dev  __rte_unused,
-                   struct rte_eth_dev_info *dev_info)
+iavfbe_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
-       dev_info->max_rx_queues = 0;
-       dev_info->max_tx_queues = 0;
-       dev_info->min_rx_bufsize = 0;
-       dev_info->max_rx_pktlen = 0;
+       struct iavfbe_rx_queue *rxq;
+       struct iavfbe_tx_queue *txq;
+       uint64_t tx_pkts = 0;
+       uint64_t tx_bytes = 0;
+       uint64_t tx_missed = 0;
+       uint64_t rx_pkts = 0;
+       uint64_t rx_bytes = 0;
+       int i;
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               if (rxq == NULL)
+                       continue;
+               rx_pkts += stats_update(rxq->stats_off.recv_pkt_num,
+                                       rxq->stats.recv_pkt_num);
+               rx_bytes += stats_update(rxq->stats_off.recv_bytes,
+                                        rxq->stats.recv_bytes);
+       }
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               if (txq == NULL)
+                       continue;
+               tx_pkts += stats_update(txq->stats_off.sent_pkt_num,
+                                       txq->stats.sent_pkt_num);
+               tx_bytes += stats_update(txq->stats_off.sent_bytes,
+                                        txq->stats.sent_bytes);
+               tx_missed += stats_update(txq->stats_off.sent_miss_num,
+                                         txq->stats.sent_miss_num);
+       }
+
+       stats->ipackets = rx_pkts;
+       stats->opackets = tx_pkts;
+       stats->oerrors = tx_missed;
+       stats->ibytes = rx_bytes;
+       stats->obytes = tx_bytes;
+
+       return 0;
+}
+
+static int
+iavfbe_stats_reset(struct rte_eth_dev *dev)
+{
+       struct iavfbe_rx_queue *rxq;
+       struct iavfbe_tx_queue *txq;
+       unsigned i;
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               if (rxq == NULL)
+                       continue;
+               rxq->stats_off = rxq->stats;
+       }
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               if (txq == NULL)
+                       continue;
+               txq->stats_off = txq->stats;
+       }
 
        return 0;
 }
@@ -86,6 +152,84 @@ iavfbe_dev_configure(struct rte_eth_dev *dev __rte_unused)
        return 0;
 }
 
+static int
+iavfbe_start_queues(struct rte_eth_dev *dev)
+{
+       struct iavfbe_rx_queue *rxq;
+       struct iavfbe_tx_queue *txq;
+       uint32_t i;
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               if (!txq || rte_atomic32_read(&txq->enable) != 0)
+                       continue;
+               dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+       }
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               if (!rxq || rte_atomic32_read(&rxq->enable) != 0)
+                       continue;
+               dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+       }
+
+       return 0;
+}
+
+static void
+iavfbe_stop_queues(struct rte_eth_dev *dev)
+{
+       struct iavfbe_rx_queue *rxq;
+       struct iavfbe_tx_queue *txq;
+       int i;
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               if (!txq)
+                       continue;
+               dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+       }
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               if (!rxq)
+                       continue;
+               dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+       }
+}
+
+static int
+iavfbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+       struct iavfbe_adapter *adapter =
+               IAVFBE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+       dev_info->max_rx_queues = adapter->nb_qps;
+       dev_info->max_tx_queues = adapter->nb_qps;
+       dev_info->min_rx_bufsize = IAVF_BE_BUF_SIZE_MIN;
+       dev_info->max_rx_pktlen = IAVF_BE_FRAME_SIZE_MAX;
+       dev_info->max_mac_addrs = IAVF_BE_NUM_MACADDR_MAX;
+
+       dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+               .nb_max = IAVF_BE_MAX_RING_DESC,
+               .nb_min = IAVF_BE_MIN_RING_DESC,
+               .nb_align = IAVF_BE_ALIGN_RING_DESC,
+       };
+
+       dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+               .nb_max = IAVF_BE_MAX_RING_DESC,
+               .nb_min = IAVF_BE_MIN_RING_DESC,
+               .nb_align = IAVF_BE_ALIGN_RING_DESC,
+       };
+
+       dev_info->rx_offload_capa =
+               DEV_RX_OFFLOAD_SCATTER |
+               DEV_RX_OFFLOAD_JUMBO_FRAME;
+       dev_info->tx_offload_capa =
+               DEV_TX_OFFLOAD_MULTI_SEGS;
+
+       return 0;
+}
+
 static int
 iavfbe_dev_start(struct rte_eth_dev *dev)
 {
@@ -94,6 +238,8 @@ iavfbe_dev_start(struct rte_eth_dev *dev)
 
        adapter->adapter_stopped = 0;
 
+       iavfbe_start_queues(dev);
+
        return 0;
 }
 
@@ -106,6 +252,8 @@ iavfbe_dev_stop(struct rte_eth_dev *dev)
        if (adapter->adapter_stopped == 1)
                return 0;
 
+       iavfbe_stop_queues(dev);
+
        adapter->adapter_stopped = 1;
 
        return 0;
@@ -133,6 +281,13 @@ iavfbe_dev_link_update(struct rte_eth_dev *dev,
 static int
 iavfbe_dev_close(struct rte_eth_dev *dev)
 {
+       struct iavfbe_adapter *adapter =
+               IAVFBE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+       /* Only send event when the emudev is alive */
+       if (adapter->started & adapter->cq_info.arq.len)
+               iavfbe_notify_vf_reset(adapter);
+
        iavfbe_destroy_adapter(dev);
        rte_eth_dev_release_port(dev);
 
@@ -199,7 +354,8 @@ iavfbe_new_device(struct rte_emudev *dev)
                }
        }
 
-       /* Lan queue info would be set when queue setup */
+       /* Only reset Lan queue if already setup, other info would be set when 
queue setup */
+       iavfbe_reset_all_queues(adapter);
 
        if (rte_emudev_get_attr(dev->dev_id, RTE_IAVF_EMU_ATTR_ASQ_HEAD,
                (rte_emudev_attr_t)&addr)) {
@@ -236,8 +392,28 @@ iavfbe_destroy_device(struct rte_emudev *dev)
 {
        struct iavfbe_adapter *adapter =
                (struct iavfbe_adapter *)dev->backend_priv;
+       struct rte_eth_dev_data *data = adapter->eth_dev->data;
+       struct iavfbe_rx_queue *rxq;
+       struct iavfbe_tx_queue *txq;
+       uint16_t i;
 
-       /* TODO: Disable all lan queues */
+       /* Disable all queues */
+       for (i = 0; i < data->nb_rx_queues; i++) {
+               rxq = data->rx_queues[i];
+               if (!rxq)
+                       continue;
+               rte_atomic32_set(&rxq->enable, false);
+               rxq->q_set = false;
+       }
+
+       for (i = 0; i < data->nb_tx_queues; i++) {
+               txq = data->tx_queues[i];
+               if (!txq)
+                       continue;
+               rte_atomic32_set(&txq->enable, false);
+               txq->q_set = false;
+       }
+       adapter->started = 0;
 
        /* update link status */
        adapter->link_up = false;
@@ -249,9 +425,13 @@ iavfbe_update_device(struct rte_emudev *dev)
 {
        struct iavfbe_adapter *adapter =
                (struct iavfbe_adapter *)dev->backend_priv;
+       struct rte_eth_dev_data *data = adapter->eth_dev->data;
        struct rte_iavf_emu_mem **mem = &(adapter->mem_table);
        struct rte_emudev_q_info q_info;
        struct rte_emudev_irq_info irq_info;
+       struct iavfbe_rx_queue *rxq;
+       struct iavfbe_tx_queue *txq;
+       uint16_t i;
 
        if (rte_emudev_get_mem_table(dev->dev_id, (void **)mem)) {
                IAVF_BE_LOG(ERR, "Can not get mem table\n");
@@ -271,10 +451,87 @@ iavfbe_update_device(struct rte_emudev *dev)
                return -1;
        }
 
-       /* TODO: Lan queue info update */
        adapter->cq_irqfd = irq_info.eventfd;
        rte_atomic32_set(&adapter->irq_enable, irq_info.enable);
 
+       for (i = 0; i < data->nb_rx_queues; i++) {
+               rxq = data->rx_queues[i];
+               if (!rxq || rxq->vector == -1)
+                       continue;
+
+               if (rte_emudev_get_irq_info(dev->dev_id,
+                       rxq->vector, &irq_info)) {
+                       IAVF_BE_LOG(ERR,
+                               "Can not get irq info of rxq %d\n", i);
+                       return -1;
+               }
+               rte_atomic32_set(&rxq->irq_enable, irq_info.enable);
+       }
+
+       for (i = 0; i < data->nb_tx_queues; i++) {
+               txq = data->tx_queues[i];
+               if (!txq || txq->vector == -1)
+                       continue;
+
+               if (rte_emudev_get_irq_info(dev->dev_id,
+                       txq->vector, &irq_info)) {
+                       IAVF_BE_LOG(ERR,
+                               "Can not get irq info of txq %d\n", i);
+                       return -1;
+               }
+               rte_atomic32_set(&txq->irq_enable, irq_info.enable);
+       }
+
+       return 0;
+}
+
+int
+iavfbe_lock_lanq(struct iavfbe_adapter *adapter)
+{
+       struct rte_eth_dev *eth_dev = adapter->eth_dev;
+       struct iavfbe_rx_queue *rxq;
+       struct iavfbe_tx_queue *txq;
+       uint16_t i;
+
+       for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+               rxq = eth_dev->data->rx_queues[i];
+               if (!rxq)
+                       continue;
+               rte_spinlock_lock(&rxq->access_lock);
+       }
+
+       for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+               txq = eth_dev->data->tx_queues[i];
+               if (!txq)
+                       continue;
+               rte_spinlock_lock(&txq->access_lock);
+       }
+
+       return 0;
+}
+
+int
+iavfbe_unlock_lanq(struct iavfbe_adapter *adapter)
+{
+       struct rte_eth_dev *eth_dev = adapter->eth_dev;
+       struct iavfbe_rx_queue *rxq;
+       struct iavfbe_tx_queue *txq;
+       uint16_t i;
+
+       for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+               rxq = eth_dev->data->rx_queues[i];
+               if (!rxq)
+                       continue;
+               rte_spinlock_unlock(&rxq->access_lock);
+       }
+
+       for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+               txq = eth_dev->data->tx_queues[i];
+               if (!txq)
+                       continue;
+               rte_spinlock_unlock(&txq->access_lock);
+       }
+
        return 0;
 }
 
@@ -287,11 +544,11 @@ iavfbe_lock_dp(struct rte_emudev *dev, int lock)
        /* Acquire/Release lock of control queue and lan queue */
 
        if (lock) {
-               /* TODO: Lan queue lock */
+               iavfbe_lock_lanq(adapter);
                rte_spinlock_lock(&adapter->cq_info.asq.access_lock);
                rte_spinlock_lock(&adapter->cq_info.arq.access_lock);
        } else {
-               /* TODO: Lan queue unlock */
+               iavfbe_unlock_lanq(adapter);
                rte_spinlock_unlock(&adapter->cq_info.asq.access_lock);
                rte_spinlock_unlock(&adapter->cq_info.arq.access_lock);
        }
@@ -358,11 +615,16 @@ iavfbe_reset_device(struct rte_emudev *dev)
        struct iavfbe_adapter *adapter =
                (struct iavfbe_adapter *)dev->backend_priv;
 
+       iavfbe_notify_vf_reset(adapter);
+
        /* Lock has been acquired by lock_dp */
-       /* TODO: reset all queues */
+       iavfbe_reset_all_queues(adapter);
        iavfbe_reset_asq(adapter, false);
        iavfbe_reset_arq(adapter, false);
 
+       memset(adapter->qps, 0, sizeof(struct virtchnl_queue_pair_info));
+       memset(&adapter->eth_stats, 0, sizeof(struct virtchnl_eth_stats));
+       adapter->nb_used_qps = 0;
        adapter->link_up = 0;
        adapter->unicast_promisc = true;
        adapter->multicast_promisc = true;
@@ -433,13 +695,14 @@ iavfbe_init_adapter(struct rte_eth_dev *eth_dev,
 {
        struct iavfbe_adapter *adapter;
        struct rte_iavf_emu_config *conf;
-       int ret;
+       int bufsz, ret;
 
        adapter = IAVFBE_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
 
        adapter->eth_dev = eth_dev;
        adapter->emu_dev = emu_dev;
        adapter->edev_id = emu_dev->dev_id;
+       adapter->cq_irqfd = IAVF_BE_INVALID_FD;
        emu_dev->backend_priv = (void *)adapter;
        rte_wmb();
 
@@ -472,6 +735,48 @@ iavfbe_init_adapter(struct rte_eth_dev *eth_dev,
        rte_spinlock_init(&adapter->cq_info.asq.access_lock);
        rte_spinlock_init(&adapter->cq_info.arq.access_lock);
 
+       /* Set VF Backend defaults during initialization */
+       adapter->virtchnl_version.major = VIRTCHNL_VERSION_MAJOR;
+       adapter->virtchnl_version.minor = VIRTCHNL_VERSION_MINOR;
+
+       bufsz = sizeof(struct virtchnl_vf_resource) +
+               (IAVF_BE_DEFAULT_VSI_NUM *
+                sizeof(struct virtchnl_vsi_resource));
+       adapter->vf_res = rte_zmalloc_socket("iavfbe", bufsz, 0,
+                                            eth_dev->device->numa_node);
+       if (!adapter->vf_res) {
+               IAVF_BE_LOG(ERR, "Fail to allocate vf_res memory");
+               ret = -ENOMEM;
+               goto err_res;
+       }
+
+       adapter->vf_res->num_vsis = IAVF_BE_DEFAULT_VSI_NUM;
+       adapter->vf_res->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
+                                       VIRTCHNL_VF_OFFLOAD_VLAN |
+                                       VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
+                                       VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+       adapter->vf_res->max_vectors = IAVF_BE_MAX_VECTORS;
+       adapter->vf_res->num_queue_pairs = adapter->nb_qps;
+       adapter->vf_res->max_mtu = AVF_DEFAULT_MAX_MTU;
+       /* Make vsi_id change with diffient emu device */
+       adapter->vf_res->vsi_res[0].vsi_id = emu_dev->dev_id;
+       adapter->vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+       adapter->vf_res->vsi_res[0].num_queue_pairs = adapter->nb_qps;
+       rte_ether_addr_copy(ether_addr,
+               (struct rte_ether_addr *)
+               adapter->vf_res->vsi_res[0].default_mac_addr);
+
+       adapter->qps =
+               rte_zmalloc_socket("iavfbe",
+                                  adapter->nb_qps * sizeof(adapter->qps[0]),
+                                  0,
+                                  eth_dev->device->numa_node);
+       if (!adapter->qps) {
+               IAVF_BE_LOG(ERR, "fail to allocate memeory for queue info");
+               ret = -ENOMEM;
+               goto err_qps;
+       }
+
        adapter->unicast_promisc = true;
        adapter->multicast_promisc = true;
        adapter->vlan_filter = false;
@@ -494,6 +799,11 @@ iavfbe_init_adapter(struct rte_eth_dev *eth_dev,
        return 0;
 
 err_thread:
+       rte_free(adapter->qps);
+err_qps:
+       rte_free(adapter->vf_res);
+err_res:
+       rte_free(adapter->cq_info.asq.aq_req);
 err_aq:
 err_info:
        rte_free(conf);
@@ -513,6 +823,9 @@ iavfbe_destroy_adapter(struct rte_eth_dev *dev)
        }
 
        rte_free(adapter->dev_info.dev_priv);
+       rte_free(adapter->cq_info.asq.aq_req);
+       rte_free(adapter->vf_res);
+       rte_free(adapter->qps);
 }
 
 static int
diff --git a/drivers/net/iavf_be/iavf_be_rxtx.c 
b/drivers/net/iavf_be/iavf_be_rxtx.c
new file mode 100644
index 0000000000..dd275b80c6
--- /dev/null
+++ b/drivers/net/iavf_be/iavf_be_rxtx.c
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <rte_string_fns.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_net.h>
+#include <rte_iavf_emu.h>
+
+#include <iavf_type.h>
+#include <virtchnl.h>
+#include "iavf_be.h"
+#include "iavf_be_rxtx.h"
+
+int
+iavfbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                         uint16_t nb_desc __rte_unused,
+                         unsigned int socket_id,
+                         const struct rte_eth_rxconf *rx_conf __rte_unused,
+                         struct rte_mempool *mp)
+{
+       struct iavfbe_adapter *ad =
+               IAVFBE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct iavfbe_rx_queue *rxq;
+       uint16_t len;
+
+       /* Free memory if needed */
+       if (dev->data->rx_queues[queue_idx]) {
+               iavfbe_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+               dev->data->rx_queues[queue_idx] = NULL;
+       }
+
+       /* Allocate the rx queue data structure */
+       rxq = rte_zmalloc_socket("iavfbe rxq",
+                                sizeof(struct iavfbe_rx_queue),
+                                RTE_CACHE_LINE_SIZE,
+                                socket_id);
+       if (!rxq) {
+               IAVF_BE_LOG(ERR, "Failed to allocate memory for "
+                                "rx queue data structure");
+               return -ENOMEM;
+       }
+
+       rxq->mp = mp;
+       rxq->nb_rx_desc = 0; /* Update when queue from fe is ready */
+       rxq->queue_id = queue_idx;
+       rxq->port_id = dev->data->port_id;
+       rxq->rx_hdr_len = 0;
+       rxq->vector = IAVF_BE_INVALID_VECTOR;
+       rxq->kickfd = IAVF_BE_INVALID_FD;
+       len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+       rxq->rx_buf_len = RTE_ALIGN(len, (1 << AVF_RXQ_CTX_DBUFF_SHIFT));
+
+       /* More ring info will be gotten in virtchnl msg */
+
+       rxq->adapter = (void *)ad;
+       dev->data->rx_queues[queue_idx] = rxq;
+
+       return 0;
+}
+
+int
+iavfbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                         uint16_t nb_desc __rte_unused,
+                         unsigned int socket_id,
+                         const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+       struct iavfbe_adapter *ad =
+               IAVFBE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct iavfbe_tx_queue *txq;
+
+       /* Free memory if needed. */
+       if (dev->data->tx_queues[queue_idx]) {
+               iavfbe_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
+               dev->data->tx_queues[queue_idx] = NULL;
+       }
+
+       /* Allocate the TX queue data structure. */
+       txq = rte_zmalloc_socket("iavfbe txq",
+                                sizeof(struct iavfbe_tx_queue),
+                                RTE_CACHE_LINE_SIZE,
+                                socket_id);
+       if (!txq) {
+               IAVF_BE_LOG(ERR, "Failed to allocate memory for "
+                                "tx queue structure");
+               return -ENOMEM;
+       }
+
+       txq->queue_id = queue_idx;
+       txq->port_id = dev->data->port_id;
+       txq->vector = IAVF_BE_INVALID_VECTOR;
+       txq->callfd = IAVF_BE_INVALID_FD;
+
+       /* More ring info will be gotten in virtchnl msg */
+
+       txq->adapter = (void *)ad;
+       dev->data->tx_queues[queue_idx] = txq;
+
+       return 0;
+}
+
+void
+iavfbe_dev_rx_queue_release(void *rxq)
+{
+       struct iavfbe_rx_queue *q = (struct iavfbe_rx_queue *)rxq;
+
+       if (!q)
+               return;
+       rte_free(q);
+}
+
+void
+iavfbe_dev_tx_queue_release(void *txq)
+{
+       struct iavfbe_tx_queue *q = (struct iavfbe_tx_queue *)txq;
+
+       if (!q)
+               return;
+       rte_free(q);
+}
+
+void
+iavfbe_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+                    struct rte_eth_rxq_info *qinfo)
+{
+       struct iavfbe_rx_queue *rxq;
+
+       rxq = dev->data->rx_queues[queue_id];
+       if (!rxq)
+               return;
+
+       qinfo->mp = rxq->mp;
+       qinfo->scattered_rx = true;
+       qinfo->nb_desc = rxq->nb_rx_desc;
+
+       qinfo->conf.rx_free_thresh = 0;
+       qinfo->conf.rx_drop_en = false;
+       qinfo->conf.rx_deferred_start = false;
+}
+
+void
+iavfbe_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+                    struct rte_eth_txq_info *qinfo)
+{
+       struct iavfbe_tx_queue *txq;
+
+       txq = dev->data->tx_queues[queue_id];
+
+       if (!txq)
+               return;
+
+       qinfo->nb_desc = txq->nb_tx_desc;
+
+       qinfo->conf.tx_free_thresh = 0;
+       qinfo->conf.tx_rs_thresh = 0;
+       qinfo->conf.offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
+       qinfo->conf.tx_deferred_start = false;
+}
diff --git a/drivers/net/iavf_be/iavf_be_rxtx.h 
b/drivers/net/iavf_be/iavf_be_rxtx.h
new file mode 100644
index 0000000000..cc72769337
--- /dev/null
+++ b/drivers/net/iavf_be/iavf_be_rxtx.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _AVF_BE_RXTX_H_
+#define _AVF_BE_RXTX_H_
+
+/* In QLEN must be whole number of 32 descriptors. */
+#define IAVF_BE_ALIGN_RING_DESC      32
+#define IAVF_BE_MIN_RING_DESC        64
+#define IAVF_BE_MAX_RING_DESC        4096
+
+#define AVF_RXQ_CTX_DBUFF_SHIFT 7
+#define AVF_RXQ_CTX_HBUFF_SHIFT 6
+
+#define AVF_RX_MAX_SEG           5
+#define IAVF_BE_INVALID_FD      -1
+#define IAVF_BE_INVALID_VECTOR  -1
+
+#define iavf_rx_desc iavf_32byte_rx_desc
+
+/* Structure associated with each Rx queue in AVF_BE. */
+struct iavfbe_rx_queue {
+       rte_spinlock_t access_lock;
+       struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
+       volatile struct iavf_tx_desc *tx_ring; /* AVF Tx ring virtual address */
+       uint64_t tx_ring_phys_addr;   /* AVF Tx ring DMA address */
+       uint16_t nb_rx_desc;          /* ring length */
+       volatile uint8_t *qtx_tail;   /* register address of tail */
+
+       uint16_t tx_head;
+       int vector;
+       int kickfd;
+       rte_atomic32_t irq_enable;
+
+       uint16_t port_id;       /* device port ID */
+       uint8_t crc_len;        /* 0 if CRC stripped, 4 otherwise */
+       uint16_t queue_id;      /* Rx queue index */
+       uint16_t rx_buf_len;    /* The packet buffer size */
+       uint16_t rx_hdr_len;    /* The header buffer size */
+       uint16_t max_pkt_len;   /* Maximum packet length */
+       bool q_set;             /* If queue has been set by virtchnl */
+       rte_atomic32_t enable;  /* If queue has been enabled set by virtchnl */
+
+       struct iavfbe_adapter *adapter; /* Point to adapter the tx queue belong 
to */
+       struct {
+               uint64_t recv_pkt_num;
+               uint64_t recv_bytes;
+               uint64_t recv_miss_num;
+               uint64_t recv_multi_num;
+               uint64_t recv_broad_num;
+       } stats, stats_off;   /* Stats information */
+};
+
+/* Structure associated with each TX queue. */
+struct iavfbe_tx_queue {
+       rte_spinlock_t access_lock;
+       volatile union iavf_rx_desc *rx_ring; /* AVF Rx ring virtual address */
+       uint64_t rx_ring_phys_addr;    /* Rx ring DMA address */
+       uint16_t nb_tx_desc;           /* ring length */
+       volatile uint8_t *qrx_tail;    /* tail address of fe's rx ring */
+       uint32_t buffer_size;          /* max buffer size of fe's rx ring */
+       uint32_t max_pkt_size;         /* max buffer size of fe's rx ring */
+
+       uint16_t rx_head;
+       int vector;
+       int callfd;
+       rte_atomic32_t irq_enable;
+
+       uint16_t port_id;
+       uint16_t queue_id;
+
+       bool q_set;             /* If queue has been set by virtchnl */
+       rte_atomic32_t enable;  /* If queue has been enabled set by virtchnl */
+
+       struct iavfbe_adapter *adapter; /* Point to adapter the tx queue belong 
to */
+       struct {
+               uint64_t sent_pkt_num;
+               uint64_t sent_bytes;
+               uint64_t sent_miss_num;
+               uint64_t sent_multi_num;
+               uint64_t sent_broad_num;
+       } stats, stats_off;   /* Stats information */
+};
+
+
+int iavfbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
+                             uint16_t queue_idx,
+                             uint16_t nb_desc,
+                             unsigned int socket_id,
+                             const struct rte_eth_rxconf *rx_conf,
+                             struct rte_mempool *mp);
+void iavfbe_dev_rx_queue_release(void *rxq);
+int iavfbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
+                             uint16_t queue_idx,
+                             uint16_t nb_desc,
+                             unsigned int socket_id,
+                             const struct rte_eth_txconf *tx_conf);
+void iavfbe_dev_tx_queue_release(void *txq);
+void iavfbe_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+                            struct rte_eth_rxq_info *qinfo);
+void iavfbe_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+                            struct rte_eth_txq_info *qinfo);
+
+#endif /* _AVF_BE_RXTX_H_ */
diff --git a/drivers/net/iavf_be/iavf_be_vchnl.c 
b/drivers/net/iavf_be/iavf_be_vchnl.c
index 56b8a485a5..2195047280 100644
--- a/drivers/net/iavf_be/iavf_be_vchnl.c
+++ b/drivers/net/iavf_be/iavf_be_vchnl.c
@@ -21,6 +21,7 @@
 #include <virtchnl.h>
 
 #include "iavf_be.h"
+#include "iavf_be_rxtx.h"
 
 static inline void
 iavfbe_notify(struct iavfbe_adapter *adapter)
@@ -34,7 +35,92 @@ iavfbe_notify(struct iavfbe_adapter *adapter)
                                        strerror(errno));
 }
 
-__rte_unused  static int
+static inline void
+reset_rxq_stats(struct iavfbe_rx_queue *rxq)
+{
+       rxq->stats.recv_pkt_num = 0;
+       rxq->stats.recv_bytes = 0;
+       rxq->stats.recv_miss_num = 0;
+       rxq->stats.recv_multi_num = 0;
+       rxq->stats.recv_broad_num = 0;
+
+       rxq->stats_off.recv_pkt_num = 0;
+       rxq->stats_off.recv_bytes = 0;
+       rxq->stats_off.recv_miss_num = 0;
+       rxq->stats_off.recv_multi_num = 0;
+       rxq->stats_off.recv_broad_num = 0;
+}
+
+static inline void
+reset_txq_stats(struct iavfbe_tx_queue *txq)
+{
+       txq->stats.sent_pkt_num = 0;
+       txq->stats.sent_bytes = 0;
+       txq->stats.sent_miss_num = 0;
+       txq->stats.sent_multi_num = 0;
+       txq->stats.sent_broad_num = 0;
+}
+
+void
+iavfbe_reset_all_queues(struct iavfbe_adapter *adapter)
+{
+       struct iavfbe_rx_queue *rxq;
+       struct iavfbe_tx_queue *txq;
+       uint16_t i;
+
+       /* Disable queues and mark them unset */
+       for (i = 0; i < adapter->eth_dev->data->nb_rx_queues; i++) {
+               rxq = adapter->eth_dev->data->rx_queues[i];
+               if (rxq) {
+                       rte_atomic32_set(&rxq->enable, false);
+                       rxq->q_set = false;
+                       rxq->tx_head = 0;
+                       reset_rxq_stats(rxq);
+               }
+       }
+
+       for (i = 0; i < adapter->eth_dev->data->nb_tx_queues; i++) {
+               txq = adapter->eth_dev->data->tx_queues[i];
+               if (txq) {
+                       rte_atomic32_set(&txq->enable, false);
+                       txq->q_set = false;
+                       txq->rx_head = 0;
+                       reset_txq_stats(txq);
+               }
+       }
+}
+
+static enum iavf_status
+apply_tx_irq(struct iavfbe_tx_queue *txq, uint16_t vector)
+{
+       struct rte_emudev_irq_info info;
+
+       txq->vector = vector;
+       if (rte_emudev_get_irq_info(txq->adapter->edev_id, vector, &info)) {
+               IAVF_BE_LOG(ERR, "Can not get irq info\n");
+               return IAVF_ERR_DEVICE_NOT_SUPPORTED;
+       }
+       txq->callfd = info.eventfd;
+
+       return 0;
+}
+
+static enum iavf_status
+apply_rx_irq(struct iavfbe_rx_queue *rxq, uint16_t vector)
+{
+       struct rte_emudev_irq_info info;
+
+       rxq->vector = vector;
+       if (rte_emudev_get_irq_info(rxq->adapter->edev_id, vector, &info)) {
+               IAVF_BE_LOG(ERR, "Can not get irq info\n");
+               return IAVF_ERR_DEVICE_NOT_SUPPORTED;
+       }
+       rxq->kickfd = info.eventfd;
+
+       return 0;
+}
+
+static int
 iavfbe_send_msg_to_vf(struct iavfbe_adapter *adapter,
                        uint32_t opcode,
                        uint32_t retval,
@@ -107,6 +193,459 @@ iavfbe_send_msg_to_vf(struct iavfbe_adapter *adapter,
        return status;
 }
 
+static void
+iavfbe_process_cmd_version(struct iavfbe_adapter *adapter,
+                               uint8_t *msg)
+{
+       struct virtchnl_version_info *info =
+               (struct virtchnl_version_info *)msg;
+
+       /* Only support V1.1 */
+       if (adapter->virtchnl_version.major == info->major &&
+           adapter->virtchnl_version.minor == info->minor)
+               iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_VERSION,
+                                     VIRTCHNL_STATUS_SUCCESS,
+                                     (uint8_t *)&adapter->virtchnl_version,
+                                     sizeof(adapter->virtchnl_version));
+       else
+               iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_VERSION,
+                                     VIRTCHNL_STATUS_NOT_SUPPORTED,
+                                     NULL, 0);
+}
+
+static int
+iavfbe_renew_device_info(struct iavfbe_adapter *adapter)
+{
+       struct rte_iavf_emu_mem **mem = &(adapter->mem_table);
+       uint64_t addr;
+
+       if (rte_emudev_get_mem_table(adapter->edev_id, (void **)mem)) {
+               IAVF_BE_LOG(ERR, "Can not get mem table\n");
+               return -1;
+       }
+
+       if (rte_emudev_get_attr(adapter->edev_id, RTE_IAVF_EMU_ATTR_RESET,
+               (rte_emudev_attr_t)&addr)) {
+               IAVF_BE_LOG(ERR, "Can not get arq head\n");
+               return -1;
+       }
+       adapter->reset = (uint8_t *)(uintptr_t)addr;
+
+       IAVF_BE_LOG(DEBUG, "DEVICE memtable re-acquired, %p\n",
+                   adapter->mem_table);
+
+       return 0;
+}
+
+static int
+iavfbe_process_cmd_reset_vf(struct iavfbe_adapter *adapter)
+{
+       adapter->started = 0;
+       IAVFBE_WRITE_32(adapter->reset, RTE_IAVF_EMU_RESET_IN_PROGRESS);
+
+       iavfbe_lock_lanq(adapter);
+       iavfbe_reset_all_queues(adapter);
+       iavfbe_unlock_lanq(adapter);
+
+       memset(adapter->qps, 0, sizeof(struct virtchnl_queue_pair_info));
+       memset(&adapter->eth_stats, 0, sizeof(struct virtchnl_eth_stats));
+       adapter->nb_used_qps = 0;
+       adapter->link_up = 0;
+       adapter->unicast_promisc = true;
+       adapter->multicast_promisc = true;
+       adapter->vlan_filter = false;
+       adapter->vlan_strip = false;
+       adapter->adapter_stopped = 1;
+
+       iavfbe_renew_device_info(adapter);
+       IAVFBE_WRITE_32(adapter->reset, RTE_IAVF_EMU_RESET_COMPLETED);
+       adapter->started = 1;
+
+       return IAVF_SUCCESS;
+}
+
+static int
+iavfbe_process_cmd_get_vf_resource(struct iavfbe_adapter *adapter,
+                               uint8_t *msg)
+{
+       struct virtchnl_vf_resource vf_res;
+       uint32_t request_caps;
+       uint32_t len = 0;
+
+       len = sizeof(struct virtchnl_vf_resource) +
+               (adapter->vf_res->num_vsis - 1) *
+               sizeof(struct virtchnl_vsi_resource);
+
+       request_caps = *(uint32_t *)msg;
+
+       rte_memcpy(&vf_res, adapter->vf_res, len);
+       vf_res.vf_cap_flags = request_caps &
+                               adapter->vf_res->vf_cap_flags;
+
+       iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
+                             VIRTCHNL_STATUS_SUCCESS, (uint8_t *)&vf_res, len);
+
+       return IAVF_SUCCESS;
+}
+
+static int
+iavfbe_process_cmd_config_vsi_queues(struct iavfbe_adapter *adapter,
+                                    uint8_t *msg,
+                                    uint16_t msglen __rte_unused)
+{
+       struct virtchnl_vsi_queue_config_info *vc_vqci =
+               (struct virtchnl_vsi_queue_config_info *)msg;
+       struct virtchnl_queue_pair_info *vc_qpi;
+       struct rte_eth_dev *dev = adapter->eth_dev;
+       struct iavfbe_rx_queue *rxq;
+       struct iavfbe_tx_queue *txq;
+       uint16_t nb_qps, queue_id;
+       int i, ret = VIRTCHNL_STATUS_SUCCESS;
+
+       if (!msg) {
+               ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto send_msg;
+       }
+
+       nb_qps = vc_vqci->num_queue_pairs;
+       vc_qpi = vc_vqci->qpair;
+
+       /* Check valid */
+       if (nb_qps > adapter->nb_qps ||
+           nb_qps > dev->data->nb_rx_queues ||
+           nb_qps > dev->data->nb_tx_queues) {
+               IAVF_BE_LOG(ERR, "number of queue pairs (%u) exceeds"
+                           " (max: %u, rxq: %u, txq: %u)", nb_qps,
+                           adapter->nb_qps, dev->data->nb_rx_queues,
+                           dev->data->nb_tx_queues);
+               ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto send_msg;
+       }
+
+       for (i = 0; i < nb_qps; i++) {
+               if (vc_qpi[i].txq.vsi_id != vc_vqci->vsi_id ||
+                   vc_qpi[i].rxq.vsi_id != vc_vqci->vsi_id ||
+                   vc_qpi[i].rxq.queue_id != vc_qpi[i].txq.queue_id ||
+                   vc_qpi[i].rxq.queue_id > adapter->nb_qps - 1 ||
+                   vc_qpi[i].rxq.ring_len > IAVF_BE_MAX_RING_DESC ||
+                   vc_qpi[i].txq.ring_len > IAVF_BE_MAX_RING_DESC ||
+                   vc_vqci->vsi_id != adapter->vf_res->vsi_res[0].vsi_id) {
+                       ret = VIRTCHNL_STATUS_ERR_PARAM;
+                       goto send_msg;
+               }
+       }
+
+       /* Store queues info internally */
+       adapter->nb_used_qps = nb_qps;
+       rte_memcpy(adapter->qps, &vc_vqci->qpair,
+                  nb_qps * sizeof(adapter->qps[0]));
+
+       for (i = 0; i < nb_qps; i++) {
+               struct rte_emudev_db_info db_info;
+
+               queue_id = adapter->qps[i].rxq.queue_id;
+               rxq = dev->data->rx_queues[queue_id];
+               txq = dev->data->tx_queues[queue_id];
+               if (!rxq || !txq) {
+                       IAVF_BE_LOG(ERR, "Queue Pair %u "
+                                   " hasn't been setup", rxq->queue_id);
+                       ret = VIRTCHNL_STATUS_NOT_SUPPORTED;
+                       goto send_msg;
+               }
+
+               memset(&db_info, 0, sizeof(db_info));
+               ret = rte_emudev_get_db_info(adapter->edev_id,
+                                         i * 2 + RTE_IAVF_EMU_ADMINQ_NUM,
+                                         &db_info);
+               if (ret || (db_info.flag & RTE_EMUDEV_DB_MEM) != 
RTE_EMUDEV_DB_MEM) {
+                       IAVF_BE_LOG(ERR, "Fail to get Door Bell of RXQ %u",
+                                   rxq->queue_id);
+                       ret = VIRTCHNL_STATUS_NOT_SUPPORTED;
+                       goto send_msg;
+               }
+
+               rte_spinlock_lock(&rxq->access_lock);
+               /* Configure Rx Queue */
+               rxq->nb_rx_desc = vc_qpi[i].txq.ring_len;
+               rxq->tx_ring_phys_addr = vc_qpi[i].txq.dma_ring_addr;
+               rxq->max_pkt_len = vc_qpi[i].rxq.max_pkt_size;
+               rxq->qtx_tail = (uint8_t *)db_info.data.mem.base;
+               /* Reset stats */
+               reset_rxq_stats(rxq);
+               rxq->tx_head = 0;
+               rxq->q_set = true;
+               rte_spinlock_unlock(&rxq->access_lock);
+
+               memset(&db_info, 0, sizeof(db_info));
+               ret = rte_emudev_get_db_info(adapter->edev_id,
+                                         i * 2 + RTE_IAVF_EMU_ADMINQ_NUM + 1,
+                                         &db_info);
+               if (ret || (db_info.flag & RTE_EMUDEV_DB_MEM) != 
RTE_EMUDEV_DB_MEM) {
+                       IAVF_BE_LOG(ERR, "Fail to get Door Bell of TXQ %u",
+                                   txq->queue_id);
+                       ret = VIRTCHNL_STATUS_NOT_SUPPORTED;
+                       goto send_msg;
+               }
+               rte_spinlock_lock(&txq->access_lock);
+               /* Configure Tx Queue */
+               txq->nb_tx_desc = vc_qpi[i].rxq.ring_len;
+               txq->rx_ring_phys_addr = vc_qpi[i].rxq.dma_ring_addr;
+               txq->buffer_size = vc_qpi[i].rxq.databuffer_size;
+               txq->max_pkt_size = vc_qpi[i].rxq.max_pkt_size;
+               txq->qrx_tail = (uint8_t *)db_info.data.mem.base;
+               /* Reset stats */
+               reset_txq_stats(txq);
+               txq->rx_head = 0;
+               txq->q_set = true;
+               rte_spinlock_unlock(&txq->access_lock);
+       }
+
+send_msg:
+       iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+                             ret, NULL, 0);
+       return ret;
+}
+
+static int
+iavfbe_process_cmd_enable_queues(struct iavfbe_adapter *adapter,
+                                uint8_t *msg,
+                                uint16_t msglen __rte_unused)
+{
+       struct virtchnl_queue_select *q_sel =
+               (struct virtchnl_queue_select *)msg;
+       struct rte_eth_dev *dev = adapter->eth_dev;
+       struct iavfbe_rx_queue *rxq;
+       struct iavfbe_tx_queue *txq;
+       int i, ret = VIRTCHNL_STATUS_SUCCESS;
+
+       if (!msg) {
+               ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto send_msg;
+       }
+
+       for (i = 0; i < adapter->nb_used_qps; i++) {
+               uint64_t len;
+
+               rxq = dev->data->rx_queues[i];
+               txq = dev->data->tx_queues[i];
+               if (!rxq || !txq) {
+                       IAVF_BE_LOG(ERR, "Queue Pair %u "
+                                   " hasn't been setup", rxq->queue_id);
+                       ret = IAVF_ERR_DEVICE_NOT_SUPPORTED;
+                       goto send_msg;
+               }
+               if (q_sel->tx_queues & (1 << i)) {
+                       if (!rxq->q_set) {
+                               IAVF_BE_LOG(ERR, "RXQ %u hasn't been setup", i);
+                               ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+                               goto send_msg;
+                       }
+                       rte_spinlock_lock(&rxq->access_lock);
+                       len = rxq->nb_rx_desc * sizeof(struct iavf_tx_desc);
+                       rxq->tx_ring = (void 
*)(uintptr_t)rte_iavf_emu_get_dma_vaddr(
+                                               adapter->mem_table,
+                                               rxq->tx_ring_phys_addr,
+                                               &len);
+                       rte_atomic32_set(&rxq->enable, true);
+                       rte_spinlock_unlock(&rxq->access_lock);
+
+               }
+               if (q_sel->rx_queues & (1 << i)) {
+                       if (!txq->q_set) {
+                               IAVF_BE_LOG(ERR, "TXQ %u hasn't been setup", i);
+                               ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+                               goto send_msg;
+                       }
+                       rte_spinlock_lock(&txq->access_lock);
+                       len = txq->nb_tx_desc * sizeof(union 
iavf_32byte_rx_desc);
+                       txq->rx_ring = (void *)(uintptr_t)
+                               rte_iavf_emu_get_dma_vaddr(adapter->mem_table,
+                                                      txq->rx_ring_phys_addr,
+                                                      &len);
+                       rte_atomic32_set(&txq->enable, true);
+                       rte_spinlock_unlock(&txq->access_lock);
+
+               }
+       }
+
+       /* Set link UP after queues are enabled */
+       adapter->link_up = true;
+       iavfbe_dev_link_update(adapter->eth_dev, 0);
+
+send_msg:
+       iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_ENABLE_QUEUES, ret, NULL, 0);
+
+       return ret;
+}
+
+static int
+iavfbe_process_cmd_disable_queues(struct iavfbe_adapter *adapter,
+                                 uint8_t *msg,
+                                 uint16_t msglen __rte_unused)
+{
+       struct virtchnl_queue_select *q_sel =
+               (struct virtchnl_queue_select *)msg;
+       struct rte_eth_dev *dev = adapter->eth_dev;
+       struct iavfbe_rx_queue *rxq;
+       struct iavfbe_tx_queue *txq;
+       int ret = VIRTCHNL_STATUS_SUCCESS;
+       uint16_t i;
+
+       if (!msg) {
+               ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto send_msg;
+       }
+
+       for (i = 0; i < adapter->nb_used_qps; i++) {
+               rxq = dev->data->rx_queues[i];
+               txq = dev->data->tx_queues[i];
+
+               if (q_sel->tx_queues & (1 << i)) {
+                       if (!rxq)
+                               continue;
+                       rte_spinlock_lock(&rxq->access_lock);
+                       rte_atomic32_set(&rxq->enable, false);
+                       rxq->tx_head = 0;
+                       reset_rxq_stats(rxq);
+                       rte_spinlock_unlock(&rxq->access_lock);
+               }
+               if (q_sel->rx_queues & (1 << i)) {
+                       if (!txq)
+                               continue;
+                       rte_spinlock_lock(&txq->access_lock);
+                       rte_atomic32_set(&txq->enable, false);
+                       txq->rx_head = 0;
+                       reset_txq_stats(txq);
+                       rte_spinlock_unlock(&txq->access_lock);
+               }
+       }
+send_msg:
+       iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
+                             ret, NULL, 0);
+
+       return ret;
+}
+
+static int
+iavfbe_process_cmd_config_irq_map(struct iavfbe_adapter *adapter,
+                                 uint8_t *msg,
+                                 uint16_t msglen __rte_unused)
+{
+       struct rte_eth_dev *dev = adapter->eth_dev;
+       struct iavfbe_tx_queue *txq;
+       struct iavfbe_rx_queue *rxq;
+       uint16_t i, j, vector_id;
+       int ret = VIRTCHNL_STATUS_SUCCESS;
+
+       struct virtchnl_irq_map_info *irqmap =
+               (struct virtchnl_irq_map_info *)msg;
+       struct virtchnl_vector_map *map;
+
+       if (msg == NULL) {
+               ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto send_msg;
+       }
+
+       IAVF_BE_LOG(DEBUG, "irqmap->num_vectors = %d\n", irqmap->num_vectors);
+
+       for (i = 0; i < irqmap->num_vectors; i++) {
+               map = &irqmap->vecmap[i];
+               vector_id = map->vector_id;
+
+               for (j = 0; j < adapter->nb_used_qps; j++) {
+                       rxq = dev->data->rx_queues[j];
+                       txq = dev->data->tx_queues[j];
+
+                       if ((1 << j) & map->rxq_map) {
+                               txq->vector = vector_id;
+                               ret = apply_tx_irq(txq, vector_id);
+                               if (ret)
+                                       goto send_msg;
+                       }
+                       if ((1 << j) & map->txq_map) {
+                               rxq->vector = vector_id;
+                               ret = apply_rx_irq(rxq, vector_id);
+                               if (ret)
+                                       goto send_msg;
+                       }
+               }
+       }
+
+send_msg:
+       iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
+                             ret, NULL, 0);
+
+       return ret;
+}
+
+
+static int
+iavfbe_process_cmd_get_stats(struct iavfbe_adapter *adapter,
+                               uint8_t *msg __rte_unused,
+                               uint16_t msglen __rte_unused)
+{
+       struct iavfbe_rx_queue *rxq;
+       struct iavfbe_tx_queue *txq;
+       int i;
+
+       memset(&adapter->eth_stats, 0, sizeof(adapter->eth_stats));
+
+       for (i = 0; i < adapter->eth_dev->data->nb_rx_queues; i++) {
+               rxq = adapter->eth_dev->data->rx_queues[i];
+               if (rxq == NULL)
+                       continue;
+               adapter->eth_stats.tx_broadcast += rxq->stats.recv_broad_num;;
+               adapter->eth_stats.tx_bytes += rxq->stats.recv_bytes;
+               adapter->eth_stats.tx_discards += rxq->stats.recv_miss_num;
+               adapter->eth_stats.tx_multicast += rxq->stats.recv_multi_num;
+               adapter->eth_stats.tx_unicast += rxq->stats.recv_pkt_num -
+                                               rxq->stats.recv_broad_num -
+                                               rxq->stats.recv_multi_num;
+       }
+
+       for (i = 0; i < adapter->eth_dev->data->nb_tx_queues; i++) {
+               txq = adapter->eth_dev->data->tx_queues[i];
+               if (txq == NULL)
+                       continue;
+               adapter->eth_stats.rx_broadcast += txq->stats.sent_broad_num;
+               adapter->eth_stats.rx_bytes += txq->stats.sent_bytes;
+               /* Dont add discards as recv count doesn't include this part */
+               adapter->eth_stats.rx_multicast += txq->stats.sent_multi_num;
+               adapter->eth_stats.rx_unicast += txq->stats.sent_pkt_num -
+                                               txq->stats.sent_broad_num -
+                                               txq->stats.sent_multi_num;
+       }
+
+       IAVF_BE_LOG(DEBUG, "rx_bytes:            %"PRIu64"",
+                                       adapter->eth_stats.tx_bytes);
+       IAVF_BE_LOG(DEBUG, "rx_unicast:          %"PRIu64"",
+                                       adapter->eth_stats.tx_unicast);
+       IAVF_BE_LOG(DEBUG, "rx_multicast:        %"PRIu64"",
+                                       adapter->eth_stats.tx_multicast);
+       IAVF_BE_LOG(DEBUG, "rx_broadcast:        %"PRIu64"",
+                                       adapter->eth_stats.tx_broadcast);
+       IAVF_BE_LOG(DEBUG, "rx_discards:         %"PRIu64"",
+                                       adapter->eth_stats.tx_discards);
+
+       IAVF_BE_LOG(DEBUG, "tx_bytes:            %"PRIu64"",
+                                       adapter->eth_stats.rx_bytes);
+       IAVF_BE_LOG(DEBUG, "tx_unicast:          %"PRIu64"",
+                                       adapter->eth_stats.rx_unicast);
+       IAVF_BE_LOG(DEBUG, "tx_multicast:        %"PRIu64"",
+                                       adapter->eth_stats.rx_multicast);
+       IAVF_BE_LOG(DEBUG, "tx_broadcast:        %"PRIu64"",
+                                       adapter->eth_stats.rx_broadcast);
+       IAVF_BE_LOG(DEBUG, "tx_discards:         %"PRIu64"",
+                                       adapter->eth_stats.rx_discards);
+
+       iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_GET_STATS,
+                             VIRTCHNL_STATUS_SUCCESS,
+                             (uint8_t *)&adapter->eth_stats,
+                             sizeof(struct virtchnl_eth_stats));
+
+       return IAVF_SUCCESS;
+}
+
 /* Read data in admin queue to get msg from vf driver */
 static enum iavf_status
 iavfbe_read_msg_from_vf(struct iavfbe_adapter *adapter,
@@ -180,6 +719,289 @@ iavfbe_read_msg_from_vf(struct iavfbe_adapter *adapter,
        return ret;
 }
 
+static void
+iavfbe_notify_vf_link_status(struct iavfbe_adapter *adapter)
+{
+       struct virtchnl_pf_event event;
+
+       event.severity = PF_EVENT_SEVERITY_INFO;
+       event.event = VIRTCHNL_EVENT_LINK_CHANGE;
+       event.event_data.link_event.link_status = adapter->link_up ? 1 : 0;
+       event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_UNKNOWN;
+
+       iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_EVENT,
+                               IAVF_SUCCESS, (uint8_t *)&event, sizeof(event));
+}
+
+void
+iavfbe_notify_vf_reset(struct iavfbe_adapter *adapter)
+{
+       struct virtchnl_pf_event event;
+
+       event.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+       event.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+
+       iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_EVENT,
+                               IAVF_SUCCESS, (uint8_t *)&event, sizeof(event));
+}
+
+static int
+iavfbe_process_cmd_enable_vlan_strip(struct iavfbe_adapter *adapter)
+{
+       adapter->vlan_strip = true;
+
+       iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+                             VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+
+       return 0;
+}
+
+static int
+iavfbe_process_cmd_disable_vlan_strip(struct iavfbe_adapter *adapter)
+{
+       adapter->vlan_strip = false;
+
+       iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+                             VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+
+       return 0;
+}
+
+static int
+iavfbe_process_cmd_config_promisc_mode(struct iavfbe_adapter *adapter,
+                               uint8_t *msg,
+                               uint16_t msglen __rte_unused)
+{
+       int ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_promisc_info *promisc =
+               (struct virtchnl_promisc_info *)msg;
+
+       if (msg == NULL) {
+               ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto send_msg;
+       }
+
+       adapter->unicast_promisc =
+               (promisc->flags & FLAG_VF_UNICAST_PROMISC) ? true : false;
+       adapter->multicast_promisc =
+               (promisc->flags & FLAG_VF_MULTICAST_PROMISC) ? true : false;
+
+send_msg:
+       iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+                             ret, NULL, 0);
+       return ret;
+}
+
+static int
+iavfbe_process_cmd_add_ether_address(struct iavfbe_adapter *adapter,
+                                    uint8_t *msg,
+                                    uint16_t msglen __rte_unused)
+{
+       struct virtchnl_ether_addr_list *addr_list =
+               (struct virtchnl_ether_addr_list *)msg;
+       int ret = VIRTCHNL_STATUS_SUCCESS;
+       int i;
+
+       if (msg == NULL) {
+               ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto send_msg;
+       }
+
+
+       for (i = 0; i < addr_list->num_elements; i++) {
+
+               /* TODO: mac filter havn't been enabled yet */
+
+       }
+
+send_msg:
+       iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
+                             ret, NULL, 0);
+       return ret;
+}
+
+static int
+iavfbe_process_cmd_del_ether_address(struct iavfbe_adapter *adapter,
+                                    uint8_t *msg,
+                                    uint16_t msglen __rte_unused)
+{
+       int ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_ether_addr_list *addr_list =
+               (struct virtchnl_ether_addr_list *)msg;
+       int i;
+
+       if (msg == NULL) {
+               ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto send_msg;
+       }
+
+       for (i = 0; i < addr_list->num_elements; i++) {
+
+               /* TODO: mac filter havn't been enabled yet */
+
+       }
+
+send_msg:
+       iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
+                             ret, NULL, 0);
+       return ret;
+}
+
+static int
+iavfbe_process_cmd_add_vlan(struct iavfbe_adapter *adapter,
+                           uint8_t *msg, uint16_t msglen __rte_unused)
+{
+       int ret = VIRTCHNL_STATUS_SUCCESS;
+       struct virtchnl_vlan_filter_list *vlan_list =
+               (struct virtchnl_vlan_filter_list *)msg;
+       int i;
+
+       if (msg == NULL) {
+               ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto send_msg;
+       }
+
+       for (i = 0; i < vlan_list->num_elements; i++) {
+
+               /* TODO: vlan filter havn't been enabled yet */
+
+       }
+
+send_msg:
+       iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_ADD_VLAN,
+                             ret, NULL, 0);
+       return ret;
+}
+
+static int
+iavfbe_process_cmd_del_vlan(struct iavfbe_adapter *adapter,
+                           uint8_t *msg,
+                           uint16_t msglen __rte_unused)
+{
+       int ret = IAVF_SUCCESS;
+       struct virtchnl_vlan_filter_list *vlan_list =
+               (struct virtchnl_vlan_filter_list *)msg;
+       int i;
+
+       if (msg == NULL) {
+               ret = VIRTCHNL_STATUS_ERR_PARAM;
+               goto send_msg;
+       }
+
+       for (i = 0; i < vlan_list->num_elements; i++) {
+
+               /* TODO: vlan filter havn't been enabled yet */
+
+       }
+
+send_msg:
+       iavfbe_send_msg_to_vf(adapter, VIRTCHNL_OP_DEL_VLAN,
+                             ret, NULL, 0);
+       return ret;
+}
+
+static void
+iavfbe_execute_vf_cmd(struct iavfbe_adapter *adapter,
+                       struct iavf_arq_event_info *event)
+{
+       enum virtchnl_ops msg_opc;
+       int ret;
+
+       msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32(
+               event->desc.cookie_high);
+       /* perform basic checks on the msg */
+       ret = virtchnl_vc_validate_vf_msg(&adapter->virtchnl_version, msg_opc,
+                                         event->msg_buf, event->msg_len);
+       if (ret) {
+               IAVF_BE_LOG(ERR, "Invalid message opcode %u, len %u",
+                           msg_opc, event->msg_len);
+               iavfbe_send_msg_to_vf(adapter, msg_opc,
+                                     VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
+                                     NULL, 0);
+       }
+
+       switch (msg_opc) {
+       case VIRTCHNL_OP_VERSION:
+               IAVF_BE_LOG(INFO, "OP_VERSION received");
+               iavfbe_process_cmd_version(adapter, event->msg_buf);
+               break;
+       case VIRTCHNL_OP_RESET_VF:
+               IAVF_BE_LOG(INFO, "OP_RESET_VF received");
+               iavfbe_process_cmd_reset_vf(adapter);
+               break;
+       case VIRTCHNL_OP_GET_VF_RESOURCES:
+               IAVF_BE_LOG(INFO, "OP_GET_VF_RESOURCES received");
+               iavfbe_process_cmd_get_vf_resource(adapter, event->msg_buf);
+               break;
+       case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+               IAVF_BE_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
+               iavfbe_process_cmd_config_vsi_queues(adapter, event->msg_buf,
+                                                    event->msg_len);
+               break;
+       case VIRTCHNL_OP_ENABLE_QUEUES:
+               IAVF_BE_LOG(INFO, "OP_ENABLE_QUEUES received");
+               iavfbe_process_cmd_enable_queues(adapter, event->msg_buf,
+                                                event->msg_len);
+               iavfbe_notify_vf_link_status(adapter);
+               break;
+       case VIRTCHNL_OP_DISABLE_QUEUES:
+               IAVF_BE_LOG(INFO, "OP_DISABLE_QUEUE received");
+               iavfbe_process_cmd_disable_queues(adapter, event->msg_buf,
+                                                 event->msg_len);
+               break;
+       case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+               IAVF_BE_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
+               iavfbe_process_cmd_config_promisc_mode(adapter, event->msg_buf,
+                                                      event->msg_len);
+               break;
+       case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+               IAVF_BE_LOG(INFO, "VIRTCHNL_OP_CONFIG_IRQ_MAP received");
+               iavfbe_process_cmd_config_irq_map(adapter, event->msg_buf,
+                                                 event->msg_len);
+               break;
+       case VIRTCHNL_OP_ADD_ETH_ADDR:
+               IAVF_BE_LOG(INFO, "VIRTCHNL_OP_ADD_ETH_ADDR received");
+               iavfbe_process_cmd_add_ether_address(adapter, event->msg_buf,
+                                                    event->msg_len);
+               break;
+       case VIRTCHNL_OP_DEL_ETH_ADDR:
+               IAVF_BE_LOG(INFO, "VIRTCHNL_OP_DEL_ETH_ADDR received");
+               iavfbe_process_cmd_del_ether_address(adapter, event->msg_buf,
+                                                    event->msg_len);
+               break;
+       case VIRTCHNL_OP_GET_STATS:
+               IAVF_BE_LOG(INFO, "VIRTCHNL_OP_GET_STATS received");
+               iavfbe_process_cmd_get_stats(adapter, event->msg_buf,
+                                            event->msg_len);
+               break;
+       case VIRTCHNL_OP_ADD_VLAN:
+               IAVF_BE_LOG(INFO, "VIRTCHNL_OP_ADD_VLAN received");
+               iavfbe_process_cmd_add_vlan(adapter, event->msg_buf,
+                                           event->msg_len);
+               break;
+       case VIRTCHNL_OP_DEL_VLAN:
+               IAVF_BE_LOG(INFO, "VIRTCHNL_OP_ADD_VLAN received");
+               iavfbe_process_cmd_del_vlan(adapter, event->msg_buf,
+                                           event->msg_len);
+               break;
+       case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+               IAVF_BE_LOG(INFO, "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING received");
+               iavfbe_process_cmd_enable_vlan_strip(adapter);
+               break;
+       case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+               IAVF_BE_LOG(INFO, "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING 
received");
+               iavfbe_process_cmd_disable_vlan_strip(adapter);
+               break;
+       default:
+               IAVF_BE_LOG(ERR, "%u received, not supported", msg_opc);
+               iavfbe_send_msg_to_vf(adapter, msg_opc,
+                                     VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
+                                     NULL, 0);
+               break;
+       }
+
+}
+
 static inline int
 iavfbe_control_queue_remap(struct iavfbe_adapter *adapter,
                          struct iavfbe_control_q *asq,
@@ -269,7 +1091,7 @@ iavfbe_handle_virtchnl_msg(void *arg)
 
                switch (aq_opc) {
                case iavf_aqc_opc_send_msg_to_pf:
-                       /* Process msg from VF BE*/
+                       iavfbe_execute_vf_cmd(adapter, &info);
                        break;
                case iavf_aqc_opc_queue_shutdown:
                        iavfbe_reset_arq(adapter, true);
diff --git a/drivers/net/iavf_be/meson.build b/drivers/net/iavf_be/meson.build
index be13a2e492..e6b1c522a7 100644
--- a/drivers/net/iavf_be/meson.build
+++ b/drivers/net/iavf_be/meson.build
@@ -10,4 +10,5 @@ deps += ['bus_vdev', 'common_iavf', 'vfio_user', 'emu_iavf']
 sources = files(
        'iavf_be_ethdev.c',
        'iavf_be_vchnl.c',
+       'iavf_be_rxtx.c',
 )
-- 
2.21.1

Reply via email to